diff --git a/Gopkg.lock b/Gopkg.lock index e9cb7fb6..1bbc4aa9 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -199,6 +199,12 @@ packages = ["."] revision = "d0303fe809921458f417bcf828397a65db30a7e4" +[[projects]] + name = "github.com/newrelic/go-agent" + packages = [".","internal","internal/jsonx","internal/logger","internal/sysinfo","internal/utilization"] + revision = "29ec3cd1bb2f21d21d36da37dae52695cb2c3a17" + version = "v1.9.0" + [[projects]] name = "github.com/pierrec/lz4" packages = ["."] @@ -259,6 +265,12 @@ revision = "fde5e16d32adc7ad637e9cd9ad21d4ebc6192535" version = "v0.2.0" +[[projects]] + branch = "master" + name = "github.com/yadvendar/negroni-newrelic-go-agent" + packages = ["."] + revision = "3dc58758cb67abc45ae91e8e7bb3d90bdc254dfb" + [[projects]] branch = "master" name = "github.com/zhouzhuojie/conditions" @@ -304,6 +316,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "9f175ccb5020e16590edf15a2edc4019b32ca75ffe7f5feabd492bd1b0f55f43" + inputs-digest = "9754d750343158529e9f3b4a6c4c32b029991c920bb56c9a28fca0cfac011ec0" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 2381984d..915f326f 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -112,3 +112,11 @@ [[constraint]] branch = "master" name = "github.com/meatballhat/negroni-logrus" + +[[constraint]] + name = "github.com/newrelic/go-agent" + version = "1.9.0" + +[[constraint]] + branch = "master" + name = "github.com/yadvendar/negroni-newrelic-go-agent" diff --git a/pkg/config/config.go b/pkg/config/config.go index f6c6642d..ab76f728 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -16,10 +16,15 @@ var Config = struct { DBDriver string `env:"FLAGR_DB_DBDRIVER" envDefault:"mysql"` DBConnectionStr string `env:"FLAGR_DB_DBCONNECTIONSTR" envDefault:"root:@tcp(127.0.0.1:18100)/flagr?parseTime=true"` - CORSEnabled bool `env:"FLAGR_CORS_ENABLED" envDefault:"true"` + CORSEnabled bool `env:"FLAGR_CORS_ENABLED" envDefault:"true"` + SentryEnabled bool `env:"FLAGR_SENTRY_ENABLED" envDefault:"false"` SentryDSN string `env:"FLAGR_SENTRY_DSN" envDefault:""` + NewRelicEnabled bool `env:"FLAGR_NEWRELIC_ENABLED" envDefault:"false"` + NewRelicAppName string `env:"FLAGR_NEWRELIC_NAME" envDefault:"flagr"` + NewRelicKey string `env:"FLAGR_NEWRELIC_KEY" envDefault:""` + EvalCacheRefreshTimeout time.Duration `env:"FLAGR_EVALCACHE_REFRESHTIMEOUT" envDefault:"59s"` EvalCacheRefreshInterval time.Duration `env:"FLAGR_EVALCACHE_REFRESHINTERVAL" envDefault:"3s"` diff --git a/swagger_gen/restapi/configure_flagr.go b/swagger_gen/restapi/configure_flagr.go index fa414504..5ab67851 100644 --- a/swagger_gen/restapi/configure_flagr.go +++ b/swagger_gen/restapi/configure_flagr.go @@ -19,6 +19,7 @@ import ( "github.com/rs/cors" "github.com/tylerb/graceful" "github.com/urfave/negroni" + newrelic "github.com/yadvendar/negroni-newrelic-go-agent" ) // This file is safe to edit. Once it exists it will not be overwritten @@ -26,9 +27,10 @@ import ( //go:generate swagger generate server --target ../swagger_gen --name --spec ../swagger.yml var ( - pwd, _ = os.Getwd() - enableCORS = config.Config.CORSEnabled - enablePProf = config.Config.PProfEnabled + pwd, _ = os.Getwd() + enableCORS = config.Config.CORSEnabled + enablePProf = config.Config.PProfEnabled + enableNewRelic = config.Config.NewRelicEnabled ) func configureFlags(api *operations.FlagrAPI) { @@ -79,6 +81,16 @@ func setupGlobalMiddleware(handler http.Handler) http.Handler { n.Use(c) } + if enableNewRelic { + nCfg := newrelic.NewConfig(config.Config.NewRelicAppName, config.Config.NewRelicKey) + nCfg.Enabled = true + newRelicMiddleware, err := newrelic.New(nCfg) + if err != nil { + logrus.Fatalf("unable to initialize newrelic. %s", err) + } + n.Use(newRelicMiddleware) + } + n.Use(negronilogrus.NewMiddlewareFromLogger(logrus.StandardLogger(), "flagr")) n.Use(negroni.NewRecovery()) n.Use(negroni.NewStatic(http.Dir(pwd + "/browser/flagr-ui/dist/"))) diff --git a/vendor/github.com/newrelic/go-agent/CHANGELOG.md b/vendor/github.com/newrelic/go-agent/CHANGELOG.md new file mode 100644 index 00000000..e4566281 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/CHANGELOG.md @@ -0,0 +1,192 @@ +## ChangeLog + +## 1.9.0 + +* Added support for [github.com/gin-gonic/gin](https://github.com/gin-gonic/gin) + in the new `nrgin` package. + * [Documentation](http://godoc.org/github.com/newrelic/go-agent/_integrations/nrgin/v1) + * [Example](examples/_gin/main.go) + +## 1.8.0 + +* Fixed incorrect metric rule application when the metric rule is flagged to + terminate and matches but the name is unchanged. + +* `Segment.End()`, `DatastoreSegment.End()`, and `ExternalSegment.End()` methods now return an + error which may be helpful in diagnosing situations where segment data is unexpectedly missing. + +## 1.7.0 + +* Added support for [gorilla/mux](http://github.com/gorilla/mux) in the new `nrgorilla` + package. + * [Documentation](http://godoc.org/github.com/newrelic/go-agent/_integrations/nrgorilla/v1) + * [Example](examples/_gorilla/main.go) + +## 1.6.0 + +* Added support for custom error messages and stack traces. Errors provided + to `Transaction.NoticeError` will now be checked to see if + they implement [ErrorClasser](https://godoc.org/github.com/newrelic/go-agent#ErrorClasser) + and/or [StackTracer](https://godoc.org/github.com/newrelic/go-agent#StackTracer). + Thanks to @fgrosse for this proposal. + +* Added support for [pkg/errors](https://github.com/pkg/errors). Thanks to + @fgrosse for this work. + * [documentation](https://godoc.org/github.com/newrelic/go-agent/_integrations/nrpkgerrors) + * [example](https://github.com/newrelic/go-agent/blob/master/_integrations/nrpkgerrors/nrpkgerrors.go) + +* Fixed tests for Go 1.8. + +## 1.5.0 + +* Added support for Windows. Thanks to @ianomad and @lvxv for the contributions. + +* The number of heap objects allocated is recorded in the + `Memory/Heap/AllocatedObjects` metric. This will soon be displayed on the "Go + runtime" page. + +* If the [DatastoreSegment](https://godoc.org/github.com/newrelic/go-agent#DatastoreSegment) + fields `Host` and `PortPathOrID` are not provided, they will no longer appear + as `"unknown"` in transaction traces and slow query traces. + +* Stack traces will now be nicely aligned in the APM UI. + +## 1.4.0 + +* Added support for slow query traces. Slow datastore segments will now + generate slow query traces viewable on the datastore tab. These traces include + a stack trace and help you to debug slow datastore activity. + [Slow Query Documentation](https://docs.newrelic.com/docs/apm/applications-menu/monitoring/viewing-slow-query-details) + +* Added new +[DatastoreSegment](https://godoc.org/github.com/newrelic/go-agent#DatastoreSegment) +fields `ParameterizedQuery`, `QueryParameters`, `Host`, `PortPathOrID`, and +`DatabaseName`. These fields will be shown in transaction traces and in slow +query traces. + +## 1.3.0 + +* Breaking Change: Added a timeout parameter to the `Application.Shutdown` method. + +## 1.2.0 + +* Added support for instrumenting short-lived processes: + * The new `Application.Shutdown` method allows applications to report + data to New Relic without waiting a full minute. + * The new `Application.WaitForConnection` method allows your process to + defer instrumentation until the application is connected and ready to + gather data. + * Full documentation here: [application.go](application.go) + * Example short-lived process: [examples/short-lived-process/main.go](examples/short-lived-process/main.go) + +* Error metrics are no longer created when `ErrorCollector.Enabled = false`. + +* Added support for [github.com/mgutz/logxi](github.com/mgutz/logxi). See + [_integrations/nrlogxi/v1/nrlogxi.go](_integrations/nrlogxi/v1/nrlogxi.go). + +* Fixed bug where Transaction Trace thresholds based upon Apdex were not being + applied to background transactions. + +## 1.1.0 + +* Added support for Transaction Traces. + +* Stack trace filenames have been shortened: Any thing preceding the first + `/src/` is now removed. + +## 1.0.0 + +* Removed `BetaToken` from the `Config` structure. + +* Breaking Datastore Change: `datastore` package contents moved to top level + `newrelic` package. `datastore.MySQL` has become `newrelic.DatastoreMySQL`. + +* Breaking Attributes Change: `attributes` package contents moved to top + level `newrelic` package. `attributes.ResponseCode` has become + `newrelic.AttributeResponseCode`. Some attribute name constants have been + shortened. + +* Added "runtime.NumCPU" to the environment tab. Thanks sergeylanzman for the + contribution. + +* Prefixed the environment tab values "Compiler", "GOARCH", "GOOS", and + "Version" with "runtime.". + +## 0.8.0 + +* Breaking Segments API Changes: The segments API has been rewritten with the + goal of being easier to use and to avoid nil Transaction checks. See: + + * [segments.go](segments.go) + * [examples/server/main.go](examples/server/main.go) + * [GUIDE.md#segments](GUIDE.md#segments) + +* Updated LICENSE.txt with contribution information. + +## 0.7.1 + +* Fixed a bug causing the `Config` to fail to serialize into JSON when the + `Transport` field was populated. + +## 0.7.0 + +* Eliminated `api`, `version`, and `log` packages. `Version`, `Config`, + `Application`, and `Transaction` now live in the top level `newrelic` package. + If you imported the `attributes` or `datastore` packages then you will need + to remove `api` from the import path. + +* Breaking Logging Changes + +Logging is no longer controlled though a single global. Instead, logging is +configured on a per-application basis with the new `Config.Logger` field. The +logger is an interface described in [log.go](log.go). See +[GUIDE.md#logging](GUIDE.md#logging). + +## 0.6.1 + +* No longer create "GC/System/Pauses" metric if no GC pauses happened. + +## 0.6.0 + +* Introduced beta token to support our beta program. + +* Rename `Config.Development` to `Config.Enabled` (and change boolean + direction). + +* Fixed a bug where exclusive time could be incorrect if segments were not + ended. + +* Fix unit tests broken in 1.6. + +* In `Config.Enabled = false` mode, the license must be the proper length or empty. + +* Added runtime statistics for CPU/memory usage, garbage collection, and number + of goroutines. + +## 0.5.0 + +* Added segment timing methods to `Transaction`. These methods must only be + used in a single goroutine. + +* The license length check will not be performed in `Development` mode. + +* Rename `SetLogFile` to `SetFile` to reduce redundancy. + +* Added `DebugEnabled` logging guard to reduce overhead. + +* `Transaction` now implements an `Ignore` method which will prevent + any of the transaction's data from being recorded. + +* `Transaction` now implements a subset of the interfaces + `http.CloseNotifier`, `http.Flusher`, `http.Hijacker`, and `io.ReaderFrom` + to match the behavior of its wrapped `http.ResponseWriter`. + +* Changed project name from `go-sdk` to `go-agent`. + +## 0.4.0 + +* Queue time support added: if the inbound request contains an +`"X-Request-Start"` or `"X-Queue-Start"` header with a unix timestamp, the +agent will report queue time metrics. Queue time will appear on the +application overview chart. The timestamp may fractional seconds, +milliseconds, or microseconds: the agent will deduce the correct units. diff --git a/vendor/github.com/newrelic/go-agent/CONTRIBUTING.md b/vendor/github.com/newrelic/go-agent/CONTRIBUTING.md new file mode 100644 index 00000000..d04bd5e7 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/CONTRIBUTING.md @@ -0,0 +1,9 @@ +# Contributing + +You are welcome to send pull requests to us. By doing so you agree that you are +granting New Relic a non-exclusive, non-revokable, no-cost license to use the +code, algorithms, patents, and ideas in that code in our products if we so +choose. You also agree the code is provided as-is and you provide no warranties +as to its fitness or correctness for any purpose. + +* [LICENSE.txt](LICENSE.txt) diff --git a/vendor/github.com/newrelic/go-agent/GUIDE.md b/vendor/github.com/newrelic/go-agent/GUIDE.md new file mode 100644 index 00000000..7230db6b --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/GUIDE.md @@ -0,0 +1,325 @@ +# New Relic Go Agent Guide + +* [Installation](#installation) +* [Config and Application](#config-and-application) +* [Logging](#logging) + * [logrus](#logrus) +* [Transactions](#transactions) +* [Segments](#segments) + * [Datastore Segments](#datastore-segments) + * [External Segments](#external-segments) +* [Attributes](#attributes) +* [Request Queuing](#request-queuing) + +## Installation + +Installing the Go Agent is the same as installing any other Go library. The +simplest way is to run: + +``` +go get github.com/newrelic/go-agent +``` + +Then import the `github.com/newrelic/go-agent` package in your application. + +## Config and Application + +* [config.go](config.go) +* [application.go](application.go) + +In your `main` function or in an `init` block: + +```go +config := newrelic.NewConfig("Your Application Name", "__YOUR_NEW_RELIC_LICENSE_KEY__") +app, err := newrelic.NewApplication(config) +``` + +Find your application in the New Relic UI. Click on it to see the Go runtime +tab that shows information about goroutine counts, garbage collection, memory, +and CPU usage. + +If you are working in a development environment or running unit tests, you may +not want the Go Agent to spawn goroutines or report to New Relic. You're in +luck! Set the config's `Enabled` field to false. This makes the license key +optional. + +```go +config := newrelic.NewConfig("Your Application Name", "") +config.Enabled = false +app, err := newrelic.NewApplication(config) +``` + +## Logging + +* [log.go](log.go) + +The agent's logging system is designed to be easily extensible. By default, no +logging will occur. To enable logging, assign the `Config.Logger` field to +something implementing the `Logger` interface. A basic logging +implementation is included. + +To log at debug level to standard out, set: + +```go +config.Logger = newrelic.NewDebugLogger(os.Stdout) +``` + +To log at info level to a file, set: + +```go +w, err := os.OpenFile("my_log_file", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) +if nil == err { + config.Logger = newrelic.NewLogger(w) +} +``` + +### logrus + +* [_integrations/nrlogrus/nrlogrus.go](_integrations/nrlogrus/nrlogrus.go) + +If you are using `logrus` and would like to send the agent's log messages to its +standard logger, import the +`github.com/newrelic/go-agent/_integrations/nrlogrus` package, then set: + +```go +config.Logger = nrlogrus.StandardLogger() +``` + +## Transactions + +* [transaction.go](transaction.go) +* [More info on Transactions](https://docs.newrelic.com/docs/apm/applications-menu/monitoring/transactions-page) + +Transactions time requests and background tasks. Each transaction should only +be used in a single goroutine. Start a new transaction when you spawn a new +goroutine. + +The simplest way to create transactions is to use +`Application.StartTransaction` and `Transaction.End`. + +```go +txn := app.StartTransaction("transactionName", responseWriter, request) +defer txn.End() +``` + +The response writer and request parameters are optional. Leave them `nil` to +instrument a background task. + +```go +txn := app.StartTransaction("backgroundTask", nil, nil) +defer txn.End() +``` + +The transaction has helpful methods like `NoticeError` and `SetName`. +See more in [transaction.go](transaction.go). + +If you are using the `http` standard library package, use `WrapHandle` and +`WrapHandleFunc`. These wrappers automatically start and end transactions with +the request and response writer. See [instrumentation.go](instrumentation.go). + +```go +http.HandleFunc(newrelic.WrapHandleFunc(app, "/users", usersHandler)) +``` + +To access the transaction in your handler, use type assertion on the response +writer passed to the handler. + +```go +func myHandler(w http.ResponseWriter, r *http.Request) { + if txn, ok := w.(newrelic.Transaction); ok { + txn.NoticeError(errors.New("my error message")) + } +} +``` + +## Segments + +* [segments.go](segments.go) + +Find out where the time in your transactions is being spent! Each transaction +should only track segments in a single goroutine. + +`Segment` is used to instrument functions, methods, and blocks of code. A +segment begins when its `StartTime` field is populated, and finishes when its +`End` method is called. + +```go +segment := newrelic.Segment{} +segment.Name = "mySegmentName" +segment.StartTime = newrelic.StartSegmentNow(txn) +// ... code you want to time here ... +segment.End() +``` + +`StartSegment` is a convenient helper. It creates a segment and starts it: + +```go +segment := newrelic.StartSegment(txn, "mySegmentName") +// ... code you want to time here ... +segment.End() +``` + +Timing a function is easy using `StartSegment` and `defer`. Just add the +following line to the beginning of that function: + +```go +defer newrelic.StartSegment(txn, "mySegmentName").End() +``` + +Segments may be nested. The segment being ended must be the most recently +started segment. + +```go +s1 := newrelic.StartSegment(txn, "outerSegment") +s2 := newrelic.StartSegment(txn, "innerSegment") +// s2 must be ended before s1 +s2.End() +s1.End() +``` + +A zero value segment may safely be ended. Therefore, the following code +is safe even if the conditional fails: + +```go +var s newrelic.Segment +if txn, ok := w.(newrelic.Transaction); ok { + s.StartTime = newrelic.StartSegmentNow(txn), +} +// ... code you wish to time here ... +s.End() +``` + +### Datastore Segments + +Datastore segments appear in the transaction "Breakdown table" and in the +"Databases" tab. + +* [datastore.go](datastore.go) +* [More info on Databases tab](https://docs.newrelic.com/docs/apm/applications-menu/monitoring/databases-slow-queries-page) + +Datastore segments are instrumented using `DatastoreSegment`. Just like basic +segments, datastore segments begin when the `StartTime` field is populated and +finish when the `End` method is called. Here is an example: + +```go +s := newrelic.DatastoreSegment{ + // Product is the datastore type. See the constants in datastore.go. + Product: newrelic.DatastoreMySQL, + // Collection is the table or group. + Collection: "my_table", + // Operation is the relevant action, e.g. "SELECT" or "GET". + Operation: "SELECT", +} +s.StartTime = newrelic.StartSegmentNow(txn) +// ... make the datastore call +s.End() +``` + +This may be combined into a single line when instrumenting a datastore call +that spans an entire function call: + +```go +defer newrelic.DatastoreSegment{ + StartTime: newrelic.StartSegmentNow(txn), + Product: newrelic.DatastoreMySQL, + Collection: "my_table", + Operation: "SELECT", +}.End() +``` + +### External Segments + +External segments appear in the transaction "Breakdown table" and in the +"External services" tab. + +* [More info on External Services tab](https://docs.newrelic.com/docs/apm/applications-menu/monitoring/external-services-page) + +External segments are instrumented using `ExternalSegment`. Populate either the +`URL` or `Request` field to indicate the endpoint. Here is an example: + +```go +func external(txn newrelic.Transaction, url string) (*http.Response, error) { + defer newrelic.ExternalSegment{ + StartTime: newrelic.StartSegmentNow(txn), + URL: url, + }.End() + + return http.Get(url) +} +``` + +We recommend using the `Request` and `Response` fields since they provide more +information about the external call. The `StartExternalSegment` helper is +useful when the request is available. This function may be modified in the +future to add headers that will trace activity between applications that are +instrumented by New Relic. + +```go +func external(txn newrelic.Transaction, req *http.Request) (*http.Response, error) { + s := newrelic.StartExternalSegment(txn, req) + response, err := http.DefaultClient.Do(req) + s.Response = response + s.End() + return response, err +} +``` + +`NewRoundTripper` is another useful helper. As with all segments, the round +tripper returned **must** only be used in the same goroutine as the transaction. + +```go +client := &http.Client{} +client.Transport = newrelic.NewRoundTripper(txn, nil) +resp, err := client.Get("http://example.com/") +``` + +## Attributes + +Attributes add context to errors and allow you to filter performance data +in Insights. + +You may add them using the `Transaction.AddAttribute` method. + +```go +txn.AddAttribute("key", "value") +txn.AddAttribute("product", "widget") +txn.AddAttribute("price", 19.99) +txn.AddAttribute("importantCustomer", true) +``` + +* [More info on Custom Attributes](https://docs.newrelic.com/docs/insights/new-relic-insights/decorating-events/insights-custom-attributes) + +Some attributes are recorded automatically. These are called agent attributes. +They are listed here: + +* [attributes.go](attributes.go) + +To disable one of these agents attributes, `AttributeResponseCode` for +example, modify the config like this: + +```go +config.Attributes.Exclude = append(config.Attributes.Exclude, newrelic.AttributeResponseCode) +``` + +* [More info on Agent Attributes](https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-metrics/agent-attributes) + +## Custom Events + +You may track arbitrary events using custom Insights events. + +```go +app.RecordCustomEvent("MyEventType", map[string]interface{}{ + "myString": "hello", + "myFloat": 0.603, + "myInt": 123, + "myBool": true, +}) +``` + +## Request Queuing + +If you are running a load balancer or reverse web proxy then you may configure +it to add a `X-Queue-Start` header with a Unix timestamp. This will create a +band on the application overview chart showing queue time. + +* [More info on Request Queuing](https://docs.newrelic.com/docs/apm/applications-menu/features/request-queuing-tracking-front-end-time) diff --git a/vendor/github.com/newrelic/go-agent/LICENSE.txt b/vendor/github.com/newrelic/go-agent/LICENSE.txt new file mode 100644 index 00000000..8f55fde1 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/LICENSE.txt @@ -0,0 +1,50 @@ +This product includes source derived from 'go' by The Go Authors, distributed +under the following BSD license: + + https://github.com/golang/go/blob/master/LICENSE + +------------------------------------------------------------------------------- + +All components of this product are Copyright (c) 2016 New Relic, Inc. All +rights reserved. + +Certain inventions disclosed in this file may be claimed within patents owned or +patent applications filed by New Relic, Inc. or third parties. + +Subject to the terms of this notice, New Relic grants you a nonexclusive, +nontransferable license, without the right to sublicense, to (a) install and +execute one copy of these files on any number of workstations owned or +controlled by you and (b) distribute verbatim copies of these files to third +parties. You may install, execute, and distribute these files and their +contents only in conjunction with your direct use of New Relic’s services. +These files and their contents shall not be used in conjunction with any other +product or software, including but not limited to those that may compete with +any New Relic product, feature, or software. As a condition to the foregoing +grant, you must provide this notice along with each copy you distribute and you +must not remove, alter, or obscure this notice. In the event you submit or +provide any feedback, code, pull requests, or suggestions to New Relic you +hereby grant New Relic a worldwide, non-exclusive, irrevocable, transferrable, +fully paid-up license to use the code, algorithms, patents, and ideas therein in +our products. + +All other use, reproduction, modification, distribution, or other exploitation +of these files is strictly prohibited, except as may be set forth in a separate +written license agreement between you and New Relic. The terms of any such +license agreement will control over this notice. The license stated above will +be automatically terminated and revoked if you exceed its scope or violate any +of the terms of this notice. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of New Relic, except as required for reasonable +and customary use in describing the origin of this file and reproducing the +content of this notice. You may not mark or brand this file with any trade +name, trademarks, service marks, or product names other than the original brand +(if any) provided by New Relic. + +Unless otherwise expressly agreed by New Relic in a separate written license +agreement, these files are provided AS IS, WITHOUT WARRANTY OF ANY KIND, +including without any implied warranties of MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE, TITLE, or NON-INFRINGEMENT. As a condition to your use of +these files, you are solely responsible for such use. New Relic will have no +liability to you for direct, indirect, consequential, incidental, special, or +punitive damages or for lost profits or data. diff --git a/vendor/github.com/newrelic/go-agent/README.md b/vendor/github.com/newrelic/go-agent/README.md new file mode 100644 index 00000000..88df0932 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/README.md @@ -0,0 +1,157 @@ +# New Relic Go Agent + +## Description + +The New Relic Go Agent allows you to monitor your Go applications with New +Relic. It helps you track transactions, outbound requests, database calls, and +other parts of your Go application's behavior and provides a running overview of +garbage collection, goroutine activity, and memory use. + +## Requirements + +Go 1.3+ is required, due to the use of http.Client's Timeout field. + +Linux, OS X, and Windows (Vista, Server 2008 and later) are supported. + +## Getting Started + +Here are the basic steps to instrumenting your application. For more +information, see [GUIDE.md](GUIDE.md). + +#### Step 0: Installation + +Installing the Go Agent is the same as installing any other Go library. The +simplest way is to run: + +``` +go get github.com/newrelic/go-agent +``` + +Then import the `github.com/newrelic/go-agent` package in your application. + +#### Step 1: Create a Config and an Application + +In your `main` function or an `init` block: + +```go +config := newrelic.NewConfig("Your Application Name", "__YOUR_NEW_RELIC_LICENSE_KEY__") +app, err := newrelic.NewApplication(config) +``` + +[more info](GUIDE.md#config-and-application), [application.go](application.go), +[config.go](config.go) + +#### Step 2: Add Transactions + +Transactions time requests and background tasks. Use `WrapHandle` and +`WrapHandleFunc` to create transactions for requests handled by the `http` +standard library package. + +```go +http.HandleFunc(newrelic.WrapHandleFunc(app, "/users", usersHandler)) +``` + +Alternatively, create transactions directly using the application's +`StartTransaction` method: + +```go +txn := app.StartTransaction("myTxn", optionalResponseWriter, optionalRequest) +defer txn.End() +``` + +[more info](GUIDE.md#transactions), [transaction.go](transaction.go) + +#### Step 3: Instrument Segments + +Segments show you where time in your transactions is being spent. At the +beginning of important functions, add: + +```go +defer newrelic.StartSegment(txn, "mySegmentName").End() +``` + +[more info](GUIDE.md#segments), [segments.go](segments.go) + +## Runnable Example + +[examples/server/main.go](./examples/server/main.go) is an example that will +appear as "Example App" in your New Relic applications list. To run it: + +``` +env NEW_RELIC_LICENSE_KEY=__YOUR_NEW_RELIC_LICENSE_KEY__LICENSE__ \ + go run examples/server/main.go +``` + +Some endpoints exposed are [http://localhost:8000/](http://localhost:8000/) +and [http://localhost:8000/notice_error](http://localhost:8000/notice_error) + + +## Basic Example + +Before Instrumentation + +```go +package main + +import ( + "io" + "net/http" +) + +func helloHandler(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "hello, world") +} + +func main() { + http.HandleFunc("/", helloHandler) + http.ListenAndServe(":8000", nil) +} +``` + +After Instrumentation + +```go +package main + +import ( + "fmt" + "io" + "net/http" + "os" + + "github.com/newrelic/go-agent" +) + +func helloHandler(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "hello, world") +} + +func main() { + // Create a config. You need to provide the desired application name + // and your New Relic license key. + cfg := newrelic.NewConfig("Example App", "__YOUR_NEW_RELIC_LICENSE_KEY__") + + // Create an application. This represents an application in the New + // Relic UI. + app, err := newrelic.NewApplication(cfg) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + // Wrap helloHandler. The performance of this handler will be recorded. + http.HandleFunc(newrelic.WrapHandleFunc(app, "/", helloHandler)) + http.ListenAndServe(":8000", nil) +} +``` + +## Support + +You can find more detailed documentation [in the guide](GUIDE.md). + +If you can't find what you're looking for there, reach out to us on our [support +site](http://support.newrelic.com/) or our [community +forum](http://forum.newrelic.com) and we'll be happy to help you. + +Find a bug? Contact us via [support.newrelic.com](http://support.newrelic.com/), +or email support@newrelic.com. diff --git a/vendor/github.com/newrelic/go-agent/_integrations/nrgin/v1/nrgin.go b/vendor/github.com/newrelic/go-agent/_integrations/nrgin/v1/nrgin.go new file mode 100644 index 00000000..397812ef --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/_integrations/nrgin/v1/nrgin.go @@ -0,0 +1,99 @@ +// Package nrgin introduces middleware to support the Gin framework. +// +// router := gin.Default() +// router.Use(nrgin.Middleware(app)) +// +package nrgin + +import ( + "net/http" + + "github.com/gin-gonic/gin" + "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" +) + +func init() { internal.TrackUsage("integration", "framework", "gin", "v1") } + +// headerResponseWriter exists to give the transaction access to response +// headers. +type headerResponseWriter struct{ w gin.ResponseWriter } + +func (w *headerResponseWriter) Header() http.Header { return w.w.Header() } +func (w *headerResponseWriter) Write([]byte) (int, error) { return 0, nil } +func (w *headerResponseWriter) WriteHeader(int) {} + +var _ http.ResponseWriter = &headerResponseWriter{} + +type replacementResponseWriter struct { + gin.ResponseWriter + txn newrelic.Transaction + code int + written bool +} + +var _ gin.ResponseWriter = &replacementResponseWriter{} + +func (w *replacementResponseWriter) flushHeader() { + if !w.written { + w.txn.WriteHeader(w.code) + w.written = true + } +} + +func (w *replacementResponseWriter) WriteHeader(code int) { + w.code = code + w.ResponseWriter.WriteHeader(code) +} + +func (w *replacementResponseWriter) Write(data []byte) (int, error) { + w.flushHeader() + return w.ResponseWriter.Write(data) +} + +func (w *replacementResponseWriter) WriteString(s string) (int, error) { + w.flushHeader() + return w.ResponseWriter.WriteString(s) +} + +func (w *replacementResponseWriter) WriteHeaderNow() { + w.flushHeader() + w.ResponseWriter.WriteHeaderNow() +} + +var ( + ctxKey = "newRelicTransaction" +) + +// Transaction returns the transaction stored inside the context, or nil if not +// found. +func Transaction(c *gin.Context) newrelic.Transaction { + if v, exists := c.Get(ctxKey); exists { + if txn, ok := v.(newrelic.Transaction); ok { + return txn + } + } + return nil +} + +// Middleware creates Gin middleware that instruments requests. +// +// router := gin.Default() +// router.Use(nrgin.Middleware(app)) +// +func Middleware(app newrelic.Application) gin.HandlerFunc { + return func(c *gin.Context) { + name := c.HandlerName() + w := &headerResponseWriter{w: c.Writer} + txn := app.StartTransaction(name, w, c.Request) + defer txn.End() + + c.Writer = &replacementResponseWriter{ + ResponseWriter: c.Writer, + txn: txn, + code: http.StatusOK, + } + c.Set(ctxKey, txn) + c.Next() + } +} diff --git a/vendor/github.com/newrelic/go-agent/_integrations/nrgorilla/v1/nrgorilla.go b/vendor/github.com/newrelic/go-agent/_integrations/nrgorilla/v1/nrgorilla.go new file mode 100644 index 00000000..fbaacb4c --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/_integrations/nrgorilla/v1/nrgorilla.go @@ -0,0 +1,65 @@ +// Package nrgorilla introduces to support for the gorilla/mux framework. See +// examples/_gorilla/main.go for an example. +package nrgorilla + +import ( + "net/http" + + "github.com/gorilla/mux" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" +) + +func init() { internal.TrackUsage("integration", "framework", "gorilla", "v1") } + +type instrumentedHandler struct { + name string + app newrelic.Application + orig http.Handler +} + +func (h instrumentedHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + txn := h.app.StartTransaction(h.name, w, r) + defer txn.End() + + h.orig.ServeHTTP(txn, r) +} + +func instrumentRoute(h http.Handler, app newrelic.Application, name string) http.Handler { + if _, ok := h.(instrumentedHandler); ok { + return h + } + return instrumentedHandler{ + name: name, + orig: h, + app: app, + } +} + +func routeName(route *mux.Route) string { + if nil == route { + return "" + } + if n := route.GetName(); n != "" { + return n + } + if n, _ := route.GetPathTemplate(); n != "" { + return n + } + n, _ := route.GetHostTemplate() + return n +} + +// InstrumentRoutes adds instrumentation to a router. This must be used after +// the routes have been added to the router. +func InstrumentRoutes(r *mux.Router, app newrelic.Application) *mux.Router { + r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { + h := instrumentRoute(route.GetHandler(), app, routeName(route)) + route.Handler(h) + return nil + }) + if nil != r.NotFoundHandler { + r.NotFoundHandler = instrumentRoute(r.NotFoundHandler, app, "NotFoundHandler") + } + return r +} diff --git a/vendor/github.com/newrelic/go-agent/_integrations/nrlogrus/nrlogrus.go b/vendor/github.com/newrelic/go-agent/_integrations/nrlogrus/nrlogrus.go new file mode 100644 index 00000000..8d7cfabb --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/_integrations/nrlogrus/nrlogrus.go @@ -0,0 +1,47 @@ +// Package nrlogrus forwards go-agent log messages to logrus. If you are using +// logrus for your application and would like the go-agent log messages to end +// up in the same place, modify your config as follows: +// +// cfg.Logger = nrlogrus.StandardLogger() +// +// Only logrus' StandardLogger is supported since there is no method (as of July +// 2016) to get the level of a logrus.Logger. See +// https://github.com/Sirupsen/logrus/issues/241 +package nrlogrus + +import ( + "github.com/Sirupsen/logrus" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" +) + +func init() { internal.TrackUsage("integration", "logging", "logrus") } + +type shim struct{ e *logrus.Entry } + +func (s *shim) Error(msg string, c map[string]interface{}) { + s.e.WithFields(c).Error(msg) +} +func (s *shim) Warn(msg string, c map[string]interface{}) { + s.e.WithFields(c).Warn(msg) +} +func (s *shim) Info(msg string, c map[string]interface{}) { + s.e.WithFields(c).Info(msg) +} +func (s *shim) Debug(msg string, c map[string]interface{}) { + s.e.WithFields(c).Info(msg) +} +func (s *shim) DebugEnabled() bool { + lvl := logrus.GetLevel() + return lvl >= logrus.DebugLevel +} + +// StandardLogger returns a newrelic.Logger which forwards agent log messages to +// the logrus package-level exported logger. +func StandardLogger() newrelic.Logger { + return &shim{ + e: logrus.WithFields(logrus.Fields{ + "component": "newrelic", + }), + } +} diff --git a/vendor/github.com/newrelic/go-agent/_integrations/nrlogxi/v1/nrlogxi.go b/vendor/github.com/newrelic/go-agent/_integrations/nrlogxi/v1/nrlogxi.go new file mode 100644 index 00000000..e6c84565 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/_integrations/nrlogxi/v1/nrlogxi.go @@ -0,0 +1,53 @@ +// Package nrlogxi forwards go-agent log messages to mgutz/logxi. If you would +// like to use mgutz/logxi for go-agent log messages, wrap your logxi Logger +// using nrlogxi.New to create a newrelic.Logger. +// +// l := log.New("newrelic") +// l.SetLevel(log.LevelInfo) +// cfg.Logger = nrlogxi.New(l) +// +package nrlogxi + +import ( + "github.com/mgutz/logxi/v1" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" +) + +func init() { internal.TrackUsage("integration", "logging", "logxi", "v1") } + +type shim struct { + e log.Logger +} + +func (l *shim) Error(msg string, context map[string]interface{}) { + l.e.Error(msg, convert(context)...) +} +func (l *shim) Warn(msg string, context map[string]interface{}) { + l.e.Warn(msg, convert(context)...) +} +func (l *shim) Info(msg string, context map[string]interface{}) { + l.e.Info(msg, convert(context)...) +} +func (l *shim) Debug(msg string, context map[string]interface{}) { + l.e.Debug(msg, convert(context)...) +} +func (l *shim) DebugEnabled() bool { + return l.e.IsDebug() +} + +func convert(c map[string]interface{}) []interface{} { + output := make([]interface{}, 0, 2*len(c)) + for k, v := range c { + output = append(output, k, v) + } + return output +} + +// New returns a newrelic.Logger which forwards agent log messages to the +// provided logxi Logger. +func New(l log.Logger) newrelic.Logger { + return &shim{ + e: l, + } +} diff --git a/vendor/github.com/newrelic/go-agent/_integrations/nrpkgerrors/nrpkgerrors.go b/vendor/github.com/newrelic/go-agent/_integrations/nrpkgerrors/nrpkgerrors.go new file mode 100644 index 00000000..75e57da0 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/_integrations/nrpkgerrors/nrpkgerrors.go @@ -0,0 +1,76 @@ +// Package nrpkgerrors introduces support for github.com/pkg/errors. +package nrpkgerrors + +import ( + "fmt" + + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal" + "github.com/pkg/errors" +) + +func init() { internal.TrackUsage("integration", "pkg-errors") } + +type nrpkgerror struct { + error +} + +// stackTracer is an error that also knows about its StackTrace. +// All wrapped errors from github.com/pkg/errors implement this interface. +type stackTracer interface { + StackTrace() errors.StackTrace +} + +func deepestStackTrace(err error) errors.StackTrace { + var last stackTracer + for err != nil { + if err, ok := err.(stackTracer); ok { + last = err + } + cause, ok := err.(interface { + Cause() error + }) + if !ok { + break + } + err = cause.Cause() + } + + if last == nil { + return nil + } + return last.StackTrace() +} + +func transformStackTrace(orig errors.StackTrace) []uintptr { + st := make([]uintptr, len(orig)) + for i, frame := range orig { + st[i] = uintptr(frame) + } + return st +} + +func (e nrpkgerror) StackTrace() []uintptr { + st := deepestStackTrace(e.error) + if nil == st { + return nil + } + return transformStackTrace(st) +} + +func (e nrpkgerror) ErrorClass() string { + if ec, ok := e.error.(newrelic.ErrorClasser); ok { + return ec.ErrorClass() + } + cause := errors.Cause(e.error) + if ec, ok := cause.(newrelic.ErrorClasser); ok { + return ec.ErrorClass() + } + return fmt.Sprintf("%T", cause) +} + +// Wrap wraps an error from github.com/pkg/errors so that the stacktrace +// provided by the error matches the format expected by the newrelic package. +func Wrap(e error) error { + return nrpkgerror{e} +} diff --git a/vendor/github.com/newrelic/go-agent/application.go b/vendor/github.com/newrelic/go-agent/application.go new file mode 100644 index 00000000..9cd6d4ff --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/application.go @@ -0,0 +1,58 @@ +package newrelic + +import ( + "net/http" + "time" +) + +// Application represents your application. +type Application interface { + // StartTransaction begins a Transaction. + // * The Transaction should only be used in a single goroutine. + // * This method never returns nil. + // * If an http.Request is provided then the Transaction is considered + // a web transaction. + // * If an http.ResponseWriter is provided then the Transaction can be + // used in its place. This allows instrumentation of the response + // code and response headers. + StartTransaction(name string, w http.ResponseWriter, r *http.Request) Transaction + + // RecordCustomEvent adds a custom event to the application. This + // feature is incompatible with high security mode. + // + // eventType must consist of alphanumeric characters, underscores, and + // colons, and must contain fewer than 255 bytes. + // + // Each value in the params map must be a number, string, or boolean. + // Keys must be less than 255 bytes. The params map may not contain + // more than 64 attributes. For more information, and a set of + // restricted keywords, see: + // + // https://docs.newrelic.com/docs/insights/new-relic-insights/adding-querying-data/inserting-custom-events-new-relic-apm-agents + RecordCustomEvent(eventType string, params map[string]interface{}) error + + // WaitForConnection blocks until the application is connected, is + // incapable of being connected, or the timeout has been reached. This + // method is useful for short-lived processes since the application will + // not gather data until it is connected. nil is returned if the + // application is connected successfully. + WaitForConnection(timeout time.Duration) error + + // Shutdown flushes data to New Relic's servers and stops all + // agent-related goroutines managing this application. After Shutdown + // is called, the application is disabled and no more data will be + // collected. This method will block until all final data is sent to + // New Relic or the timeout has elapsed. + Shutdown(timeout time.Duration) +} + +// NewApplication creates an Application and spawns goroutines to manage the +// aggregation and harvesting of data. On success, a non-nil Application and a +// nil error are returned. On failure, a nil Application and a non-nil error +// are returned. +// +// Applications do not share global state (other than the shared log.Logger). +// Therefore, it is safe to create multiple applications. +func NewApplication(c Config) (Application, error) { + return newApp(c) +} diff --git a/vendor/github.com/newrelic/go-agent/attributes.go b/vendor/github.com/newrelic/go-agent/attributes.go new file mode 100644 index 00000000..f5f2761a --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/attributes.go @@ -0,0 +1,42 @@ +package newrelic + +// This file contains the names of the automatically captured attributes. +// Attributes are key value pairs attached to transaction events, error events, +// and traced errors. You may add your own attributes using the +// Transaction.AddAttribute method (see transaction.go). +// +// These attribute names are exposed here to facilitate configuration. +// +// For more information, see: +// https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-metrics/agent-attributes + +// Attributes destined for Transaction Events and Errors: +const ( + // AttributeResponseCode is the response status code for a web request. + AttributeResponseCode = "httpResponseCode" + // AttributeRequestMethod is the request's method. + AttributeRequestMethod = "request.method" + // AttributeRequestAccept is the request's "Accept" header. + AttributeRequestAccept = "request.headers.accept" + // AttributeRequestContentType is the request's "Content-Type" header. + AttributeRequestContentType = "request.headers.contentType" + // AttributeRequestContentLength is the request's "Content-Length" header. + AttributeRequestContentLength = "request.headers.contentLength" + // AttributeRequestHost is the request's "Host" header. + AttributeRequestHost = "request.headers.host" + // AttributeResponseContentType is the response "Content-Type" header. + AttributeResponseContentType = "response.headers.contentType" + // AttributeResponseContentLength is the response "Content-Length" header. + AttributeResponseContentLength = "response.headers.contentLength" + // AttributeHostDisplayName contains the value of Config.HostDisplayName. + AttributeHostDisplayName = "host.displayName" +) + +// Attributes destined for Errors: +const ( + // AttributeRequestUserAgent is the request's "User-Agent" header. + AttributeRequestUserAgent = "request.headers.User-Agent" + // AttributeRequestReferer is the request's "Referer" header. Query + // string parameters are removed. + AttributeRequestReferer = "request.headers.referer" +) diff --git a/vendor/github.com/newrelic/go-agent/config.go b/vendor/github.com/newrelic/go-agent/config.go new file mode 100644 index 00000000..af8d8c37 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/config.go @@ -0,0 +1,257 @@ +package newrelic + +import ( + "errors" + "fmt" + "net/http" + "strings" + "time" +) + +// Config contains Application and Transaction behavior settings. +// Use NewConfig to create a Config with proper defaults. +type Config struct { + // AppName is used by New Relic to link data across servers. + // + // https://docs.newrelic.com/docs/apm/new-relic-apm/installation-configuration/naming-your-application + AppName string + + // License is your New Relic license key. + // + // https://docs.newrelic.com/docs/accounts-partnerships/accounts/account-setup/license-key + License string + + // Logger controls go-agent logging. See log.go. + Logger Logger + + // Enabled determines whether the agent will communicate with the New + // Relic servers and spawn goroutines. Setting this to be false can be + // useful in testing and staging situations. + Enabled bool + + // Labels are key value pairs used to roll up applications into specific + // categories. + // + // https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/labels-categories-organizing-your-apps-servers + Labels map[string]string + + // HighSecurity guarantees that certain agent settings can not be made + // more permissive. This setting must match the corresponding account + // setting in the New Relic UI. + // + // https://docs.newrelic.com/docs/accounts-partnerships/accounts/security/high-security + HighSecurity bool + + // CustomInsightsEvents controls the behavior of + // Application.RecordCustomEvent. + // + // https://docs.newrelic.com/docs/insights/new-relic-insights/adding-querying-data/inserting-custom-events-new-relic-apm-agents + CustomInsightsEvents struct { + // Enabled controls whether RecordCustomEvent will collect + // custom analytics events. High security mode overrides this + // setting. + Enabled bool + } + + // TransactionEvents controls the behavior of transaction analytics + // events. + TransactionEvents struct { + // Enabled controls whether transaction events are captured. + Enabled bool + // Attributes controls the attributes included with transaction + // events. + Attributes AttributeDestinationConfig + } + + // ErrorCollector controls the capture of errors. + ErrorCollector struct { + // Enabled controls whether errors are captured. This setting + // affects both traced errors and error analytics events. + Enabled bool + // CaptureEvents controls whether error analytics events are + // captured. + CaptureEvents bool + // IgnoreStatusCodes controls which http response codes are + // automatically turned into errors. By default, response codes + // greater than or equal to 400, with the exception of 404, are + // turned into errors. + IgnoreStatusCodes []int + // Attributes controls the attributes included with errors. + Attributes AttributeDestinationConfig + } + + // TransactionTracer controls the capture of transaction traces. + TransactionTracer struct { + // Enabled controls whether transaction traces are captured. + Enabled bool + // Threshold controls whether a transaction trace will be + // considered for capture. Of the traces exceeding the + // threshold, the slowest trace every minute is captured. + Threshold struct { + // If IsApdexFailing is true then the trace threshold is + // four times the apdex threshold. + IsApdexFailing bool + // If IsApdexFailing is false then this field is the + // threshold, otherwise it is ignored. + Duration time.Duration + } + // SegmentThreshold is the threshold at which segments will be + // added to the trace. Lowering this setting may increase + // overhead. + SegmentThreshold time.Duration + // StackTraceThreshold is the threshold at which segments will + // be given a stack trace in the transaction trace. Lowering + // this setting will drastically increase overhead. + StackTraceThreshold time.Duration + // Attributes controls the attributes included with transaction + // traces. + Attributes AttributeDestinationConfig + } + + // HostDisplayName gives this server a recognizable name in the New + // Relic UI. This is an optional setting. + HostDisplayName string + + // UseTLS controls whether http or https is used to send data to New + // Relic servers. + UseTLS bool + + // Transport customizes http.Client communication with New Relic + // servers. This may be used to configure a proxy. + Transport http.RoundTripper + + // Utilization controls the detection and gathering of system + // information. + Utilization struct { + // DetectAWS controls whether the Application attempts to detect + // AWS. + DetectAWS bool + // DetectDocker controls whether the Application attempts to + // detect Docker. + DetectDocker bool + + // These settings provide system information when custom values + // are required. + LogicalProcessors int + TotalRAMMIB int + BillingHostname string + } + + // DatastoreTracer controls behavior relating to datastore segments. + DatastoreTracer struct { + InstanceReporting struct { + Enabled bool + } + DatabaseNameReporting struct { + Enabled bool + } + QueryParameters struct { + Enabled bool + } + // SlowQuery controls the capture of slow query traces. Slow + // query traces show you instances of your slowest datastore + // segments. + SlowQuery struct { + Enabled bool + Threshold time.Duration + } + } + + // Attributes controls the attributes included with errors and + // transaction events. + Attributes AttributeDestinationConfig + + // RuntimeSampler controls the collection of runtime statistics like + // CPU/Memory usage, goroutine count, and GC pauses. + RuntimeSampler struct { + // Enabled controls whether runtime statistics are captured. + Enabled bool + } +} + +// AttributeDestinationConfig controls the attributes included with errors and +// transaction events. +type AttributeDestinationConfig struct { + Enabled bool + Include []string + Exclude []string +} + +// NewConfig creates an Config populated with the given appname, license, +// and expected default values. +func NewConfig(appname, license string) Config { + c := Config{} + + c.AppName = appname + c.License = license + c.Enabled = true + c.Labels = make(map[string]string) + c.CustomInsightsEvents.Enabled = true + c.TransactionEvents.Enabled = true + c.TransactionEvents.Attributes.Enabled = true + c.HighSecurity = false + c.UseTLS = true + c.ErrorCollector.Enabled = true + c.ErrorCollector.CaptureEvents = true + c.ErrorCollector.IgnoreStatusCodes = []int{ + http.StatusNotFound, // 404 + } + c.ErrorCollector.Attributes.Enabled = true + c.Utilization.DetectAWS = true + c.Utilization.DetectDocker = true + c.Attributes.Enabled = true + c.RuntimeSampler.Enabled = true + + c.TransactionTracer.Enabled = true + c.TransactionTracer.Threshold.IsApdexFailing = true + c.TransactionTracer.Threshold.Duration = 500 * time.Millisecond + c.TransactionTracer.SegmentThreshold = 2 * time.Millisecond + c.TransactionTracer.StackTraceThreshold = 500 * time.Millisecond + c.TransactionTracer.Attributes.Enabled = true + + c.DatastoreTracer.InstanceReporting.Enabled = true + c.DatastoreTracer.DatabaseNameReporting.Enabled = true + c.DatastoreTracer.QueryParameters.Enabled = true + c.DatastoreTracer.SlowQuery.Enabled = true + c.DatastoreTracer.SlowQuery.Threshold = 10 * time.Millisecond + + return c +} + +const ( + licenseLength = 40 + appNameLimit = 3 +) + +// The following errors will be returned if your Config fails to validate. +var ( + errLicenseLen = fmt.Errorf("license length is not %d", licenseLength) + errHighSecurityTLS = errors.New("high security requires TLS") + errAppNameMissing = errors.New("AppName required") + errAppNameLimit = fmt.Errorf("max of %d rollup application names", appNameLimit) +) + +// Validate checks the config for improper fields. If the config is invalid, +// newrelic.NewApplication returns an error. +func (c Config) Validate() error { + if c.Enabled { + if len(c.License) != licenseLength { + return errLicenseLen + } + } else { + // The License may be empty when the agent is not enabled. + if len(c.License) != licenseLength && len(c.License) != 0 { + return errLicenseLen + } + } + if c.HighSecurity && !c.UseTLS { + return errHighSecurityTLS + } + if "" == c.AppName { + return errAppNameMissing + } + if strings.Count(c.AppName, ";") >= appNameLimit { + return errAppNameLimit + } + return nil +} diff --git a/vendor/github.com/newrelic/go-agent/datastore.go b/vendor/github.com/newrelic/go-agent/datastore.go new file mode 100644 index 00000000..6a2db240 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/datastore.go @@ -0,0 +1,27 @@ +package newrelic + +// DatastoreProduct encourages consistent metrics across New Relic agents. You +// may create your own if your datastore is not listed below. +type DatastoreProduct string + +// Datastore names used across New Relic agents: +const ( + DatastoreCassandra DatastoreProduct = "Cassandra" + DatastoreDerby = "Derby" + DatastoreElasticsearch = "Elasticsearch" + DatastoreFirebird = "Firebird" + DatastoreIBMDB2 = "IBMDB2" + DatastoreInformix = "Informix" + DatastoreMemcached = "Memcached" + DatastoreMongoDB = "MongoDB" + DatastoreMySQL = "MySQL" + DatastoreMSSQL = "MSSQL" + DatastoreOracle = "Oracle" + DatastorePostgres = "Postgres" + DatastoreRedis = "Redis" + DatastoreSolr = "Solr" + DatastoreSQLite = "SQLite" + DatastoreCouchDB = "CouchDB" + DatastoreRiak = "Riak" + DatastoreVoltDB = "VoltDB" +) diff --git a/vendor/github.com/newrelic/go-agent/errors.go b/vendor/github.com/newrelic/go-agent/errors.go new file mode 100644 index 00000000..b9bd42de --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/errors.go @@ -0,0 +1,13 @@ +package newrelic + +// StackTracer is type that can be implemented by errors to provide a stack +// trace when using Transaction.NoticeError. +type StackTracer interface { + StackTrace() []uintptr +} + +// ErrorClasser is type that can be implemented by errors to provide a custom +// class when using Transaction.NoticeError. +type ErrorClasser interface { + ErrorClass() string +} diff --git a/vendor/github.com/newrelic/go-agent/examples/_gin/main.go b/vendor/github.com/newrelic/go-agent/examples/_gin/main.go new file mode 100644 index 00000000..3dfb4fd0 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/examples/_gin/main.go @@ -0,0 +1,93 @@ +package main + +import ( + "fmt" + "os" + + "github.com/gin-gonic/gin" + "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/nrgin/v1" +) + +func makeGinEndpoint(s string) func(*gin.Context) { + return func(c *gin.Context) { + c.Writer.WriteString(s) + } +} + +func v1login(c *gin.Context) { c.Writer.WriteString("v1 login") } +func v1submit(c *gin.Context) { c.Writer.WriteString("v1 submit") } +func v1read(c *gin.Context) { c.Writer.WriteString("v1 read") } + +func endpoint404(c *gin.Context) { + c.Writer.WriteHeader(404) + c.Writer.WriteString("returning 404") +} + +func endpointChangeCode(c *gin.Context) { + // gin.ResponseWriter buffers the response code so that it can be + // changed before the first write. + c.Writer.WriteHeader(404) + c.Writer.WriteHeader(200) + c.Writer.WriteString("actually ok!") +} + +func endpointResponseHeaders(c *gin.Context) { + // Since gin.ResponseWriter buffers the response code, response headers + // can be set afterwards. + c.Writer.WriteHeader(200) + c.Writer.Header().Set("Content-Type", "application/json") + c.Writer.WriteString(`{"zip":"zap"}`) +} + +func endpointNotFound(c *gin.Context) { + c.Writer.WriteString("there's no endpoint for that!") +} + +func endpointAccessTransaction(c *gin.Context) { + if txn := nrgin.Transaction(c); nil != txn { + txn.SetName("custom-name") + } + c.Writer.WriteString("changed the name of the transaction!") +} + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + cfg := newrelic.NewConfig("Gin App", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + app, err := newrelic.NewApplication(cfg) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + router := gin.Default() + router.Use(nrgin.Middleware(app)) + + router.GET("/404", endpoint404) + router.GET("/change", endpointChangeCode) + router.GET("/headers", endpointResponseHeaders) + router.GET("/txn", endpointAccessTransaction) + + // Since the handler function name is used as the transaction name, + // anonymous functions do not get usefully named. We encourage + // transforming anonymous functions into named functions. + router.GET("/anon", func(c *gin.Context) { + c.Writer.WriteString("anonymous function handler") + }) + + v1 := router.Group("/v1") + v1.GET("/login", v1login) + v1.GET("/submit", v1submit) + v1.GET("/read", v1read) + + router.NoRoute(endpointNotFound) + + router.Run(":8000") +} diff --git a/vendor/github.com/newrelic/go-agent/examples/_gorilla/main.go b/vendor/github.com/newrelic/go-agent/examples/_gorilla/main.go new file mode 100644 index 00000000..a9e9404a --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/examples/_gorilla/main.go @@ -0,0 +1,50 @@ +package main + +import ( + "fmt" + "net/http" + "os" + + "github.com/gorilla/mux" + newrelic "github.com/newrelic/go-agent" + nrgorilla "github.com/newrelic/go-agent/_integrations/nrgorilla/v1" +) + +func makeHandler(text string) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(text)) + }) +} + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + cfg := newrelic.NewConfig("Gorilla App", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + app, err := newrelic.NewApplication(cfg) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + r := mux.NewRouter() + r.Handle("/", makeHandler("index")) + r.Handle("/alpha", makeHandler("alpha")) + + users := r.PathPrefix("/users").Subrouter() + users.Handle("/add", makeHandler("adding user")) + users.Handle("/delete", makeHandler("deleting user")) + + // The route name will be used as the transaction name if one is set. + r.Handle("/named", makeHandler("named route")).Name("special-name-route") + + // The NotFoundHandler will be instrumented if it is set. + r.NotFoundHandler = makeHandler("not found") + + http.ListenAndServe(":8000", nrgorilla.InstrumentRoutes(r, app)) +} diff --git a/vendor/github.com/newrelic/go-agent/examples/_logrus/main.go b/vendor/github.com/newrelic/go-agent/examples/_logrus/main.go new file mode 100644 index 00000000..8516a2b5 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/examples/_logrus/main.go @@ -0,0 +1,37 @@ +package main + +import ( + "fmt" + "io" + "net/http" + "os" + + "github.com/Sirupsen/logrus" + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/nrlogrus" +) + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + cfg := newrelic.NewConfig("Logrus App", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + logrus.SetLevel(logrus.DebugLevel) + cfg.Logger = nrlogrus.StandardLogger() + + app, err := newrelic.NewApplication(cfg) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + http.HandleFunc(newrelic.WrapHandleFunc(app, "/", func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "hello world") + })) + + http.ListenAndServe(":8000", nil) +} diff --git a/vendor/github.com/newrelic/go-agent/examples/_pkgerrors/main.go b/vendor/github.com/newrelic/go-agent/examples/_pkgerrors/main.go new file mode 100644 index 00000000..8d9279cc --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/examples/_pkgerrors/main.go @@ -0,0 +1,57 @@ +package main + +import ( + "fmt" + "os" + "time" + + newrelic "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/_integrations/nrpkgerrors" + "github.com/pkg/errors" +) + +type sampleError string + +func (e sampleError) Error() string { + return string(e) +} + +func alpha() error { + return errors.WithStack(sampleError("alpha is the cause")) +} + +func beta() error { + return errors.WithStack(alpha()) +} + +func gamma() error { + return errors.Wrap(beta(), "gamma was involved") +} + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + cfg := newrelic.NewConfig("pkg/errors app", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + app, err := newrelic.NewApplication(cfg) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + if err := app.WaitForConnection(5 * time.Second); nil != err { + fmt.Println(err) + } + + txn := app.StartTransaction("has-error", nil, nil) + e := gamma() + txn.NoticeError(nrpkgerrors.Wrap(e)) + txn.End() + + app.Shutdown(10 * time.Second) +} diff --git a/vendor/github.com/newrelic/go-agent/examples/server/main.go b/vendor/github.com/newrelic/go-agent/examples/server/main.go new file mode 100644 index 00000000..38678111 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/examples/server/main.go @@ -0,0 +1,189 @@ +package main + +import ( + "errors" + "fmt" + "io" + "math/rand" + "net/http" + "os" + "time" + + newrelic "github.com/newrelic/go-agent" +) + +var ( + app newrelic.Application +) + +func index(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "hello world") +} + +func versionHandler(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "New Relic Go Agent Version: "+newrelic.Version) +} + +func noticeError(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "noticing an error") + + if txn, ok := w.(newrelic.Transaction); ok { + txn.NoticeError(errors.New("my error message")) + } +} + +func customEvent(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "recording a custom event") + + app.RecordCustomEvent("my_event_type", map[string]interface{}{ + "myString": "hello", + "myFloat": 0.603, + "myInt": 123, + "myBool": true, + }) +} + +func setName(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "changing the transaction's name") + + if txn, ok := w.(newrelic.Transaction); ok { + txn.SetName("other-name") + } +} + +func addAttribute(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "adding attributes") + + if txn, ok := w.(newrelic.Transaction); ok { + txn.AddAttribute("myString", "hello") + txn.AddAttribute("myInt", 123) + } +} + +func background(w http.ResponseWriter, r *http.Request) { + // Transactions started without an http.Request are classified as + // background transactions. + txn := app.StartTransaction("background", nil, nil) + defer txn.End() + + io.WriteString(w, "background transaction") + time.Sleep(150 * time.Millisecond) +} + +func ignore(w http.ResponseWriter, r *http.Request) { + if coinFlip := (0 == rand.Intn(2)); coinFlip { + if txn, ok := w.(newrelic.Transaction); ok { + txn.Ignore() + } + io.WriteString(w, "ignoring the transaction") + } else { + io.WriteString(w, "not ignoring the transaction") + } +} + +func segments(w http.ResponseWriter, r *http.Request) { + txn, _ := w.(newrelic.Transaction) + + func() { + defer newrelic.StartSegment(txn, "f1").End() + + func() { + defer newrelic.StartSegment(txn, "f2").End() + + io.WriteString(w, "segments!") + time.Sleep(10 * time.Millisecond) + }() + time.Sleep(15 * time.Millisecond) + }() + time.Sleep(20 * time.Millisecond) +} + +func mysql(w http.ResponseWriter, r *http.Request) { + txn, _ := w.(newrelic.Transaction) + s := newrelic.DatastoreSegment{ + StartTime: newrelic.StartSegmentNow(txn), + Product: newrelic.DatastoreMySQL, + Collection: "users", + Operation: "INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + QueryParameters: map[string]interface{}{ + "name": "Dracula", + "age": 439, + }, + Host: "mysql-server-1", + PortPathOrID: "3306", + DatabaseName: "my_database", + } + defer s.End() + + time.Sleep(20 * time.Millisecond) + io.WriteString(w, `performing fake query "INSERT * from users"`) +} + +func external(w http.ResponseWriter, r *http.Request) { + url := "http://example.com/" + txn, _ := w.(newrelic.Transaction) + // This demonstrates an external segment where only the URL is known. If + // an http.Request is accessible then `StartExternalSegment` is + // recommended. See the implementation of `NewRoundTripper` for an + // example. + defer newrelic.ExternalSegment{ + StartTime: newrelic.StartSegmentNow(txn), + URL: url, + }.End() + + resp, err := http.Get(url) + if nil != err { + io.WriteString(w, err.Error()) + return + } + defer resp.Body.Close() + io.Copy(w, resp.Body) +} + +func roundtripper(w http.ResponseWriter, r *http.Request) { + client := &http.Client{} + txn, _ := w.(newrelic.Transaction) + client.Transport = newrelic.NewRoundTripper(txn, nil) + resp, err := client.Get("http://example.com/") + if nil != err { + io.WriteString(w, err.Error()) + return + } + defer resp.Body.Close() + io.Copy(w, resp.Body) +} + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + cfg := newrelic.NewConfig("Example App", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + + var err error + app, err = newrelic.NewApplication(cfg) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + http.HandleFunc(newrelic.WrapHandleFunc(app, "/", index)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/version", versionHandler)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/notice_error", noticeError)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/custom_event", customEvent)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/set_name", setName)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/add_attribute", addAttribute)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/ignore", ignore)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/segments", segments)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/mysql", mysql)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/external", external)) + http.HandleFunc(newrelic.WrapHandleFunc(app, "/roundtripper", roundtripper)) + http.HandleFunc("/background", background) + + http.ListenAndServe(":8000", nil) +} diff --git a/vendor/github.com/newrelic/go-agent/examples/short-lived-process/main.go b/vendor/github.com/newrelic/go-agent/examples/short-lived-process/main.go new file mode 100644 index 00000000..ddf18802 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/examples/short-lived-process/main.go @@ -0,0 +1,46 @@ +package main + +import ( + "fmt" + "os" + "time" + + "github.com/newrelic/go-agent" +) + +func mustGetEnv(key string) string { + if val := os.Getenv(key); "" != val { + return val + } + panic(fmt.Sprintf("environment variable %s unset", key)) +} + +func main() { + cfg := newrelic.NewConfig("Short Lived App", mustGetEnv("NEW_RELIC_LICENSE_KEY")) + cfg.Logger = newrelic.NewDebugLogger(os.Stdout) + app, err := newrelic.NewApplication(cfg) + if nil != err { + fmt.Println(err) + os.Exit(1) + } + + // Wait for the application to connect. + if err := app.WaitForConnection(5 * time.Second); nil != err { + fmt.Println(err) + } + + // Do the tasks at hand. Perhaps record them using transactions and/or + // custom events. + tasks := []string{"white", "black", "red", "blue", "green", "yellow"} + for _, task := range tasks { + txn := app.StartTransaction("task", nil, nil) + time.Sleep(10 * time.Millisecond) + txn.End() + app.RecordCustomEvent("task", map[string]interface{}{ + "color": task, + }) + } + + // Shut down the application to flush data to New Relic. + app.Shutdown(10 * time.Second) +} diff --git a/vendor/github.com/newrelic/go-agent/instrumentation.go b/vendor/github.com/newrelic/go-agent/instrumentation.go new file mode 100644 index 00000000..12b0bf19 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/instrumentation.go @@ -0,0 +1,68 @@ +package newrelic + +import "net/http" + +// instrumentation.go contains helpers built on the lower level api. + +// WrapHandle facilitates instrumentation of handlers registered with an +// http.ServeMux. For example, to instrument this code: +// +// http.Handle("/foo", fooHandler) +// +// Perform this replacement: +// +// http.Handle(newrelic.WrapHandle(app, "/foo", fooHandler)) +// +// The Transaction is passed to the handler in place of the original +// http.ResponseWriter, so it can be accessed using type assertion. +// For example, to rename the transaction: +// +// // 'w' is the variable name of the http.ResponseWriter. +// if txn, ok := w.(newrelic.Transaction); ok { +// txn.SetName("other-name") +// } +// +func WrapHandle(app Application, pattern string, handler http.Handler) (string, http.Handler) { + return pattern, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + txn := app.StartTransaction(pattern, w, r) + defer txn.End() + + handler.ServeHTTP(txn, r) + }) +} + +// WrapHandleFunc serves the same purpose as WrapHandle for functions registered +// with ServeMux.HandleFunc. +func WrapHandleFunc(app Application, pattern string, handler func(http.ResponseWriter, *http.Request)) (string, func(http.ResponseWriter, *http.Request)) { + p, h := WrapHandle(app, pattern, http.HandlerFunc(handler)) + return p, func(w http.ResponseWriter, r *http.Request) { h.ServeHTTP(w, r) } +} + +// NewRoundTripper creates an http.RoundTripper to instrument external requests. +// This RoundTripper must be used in same the goroutine as the other uses of the +// Transaction's SegmentTracer methods. http.DefaultTransport is used if an +// http.RoundTripper is not provided. +// +// client := &http.Client{} +// client.Transport = newrelic.NewRoundTripper(txn, nil) +// resp, err := client.Get("http://example.com/") +// +func NewRoundTripper(txn Transaction, original http.RoundTripper) http.RoundTripper { + return roundTripperFunc(func(request *http.Request) (*http.Response, error) { + segment := StartExternalSegment(txn, request) + + if nil == original { + original = http.DefaultTransport + } + response, err := original.RoundTrip(request) + + segment.Response = response + segment.End() + + return response, err + }) +} + +type roundTripperFunc func(*http.Request) (*http.Response, error) + +func (f roundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { return f(r) } diff --git a/vendor/github.com/newrelic/go-agent/internal/analytics_events.go b/vendor/github.com/newrelic/go-agent/internal/analytics_events.go new file mode 100644 index 00000000..151766a3 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/analytics_events.go @@ -0,0 +1,122 @@ +package internal + +import ( + "bytes" + "container/heap" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +// eventStamp allows for uniform random sampling of events. When an event is +// created it is given an eventStamp. Whenever an event pool is full and events +// need to be dropped, the events with the lowest stamps are dropped. +type eventStamp float32 + +func eventStampCmp(a, b eventStamp) bool { + return a < b +} + +type analyticsEvent struct { + stamp eventStamp + jsonWriter +} + +type analyticsEventHeap []analyticsEvent + +type analyticsEvents struct { + numSeen int + events analyticsEventHeap + failedHarvests int +} + +func (events *analyticsEvents) NumSeen() float64 { return float64(events.numSeen) } +func (events *analyticsEvents) NumSaved() float64 { return float64(len(events.events)) } + +func (h analyticsEventHeap) Len() int { return len(h) } +func (h analyticsEventHeap) Less(i, j int) bool { return eventStampCmp(h[i].stamp, h[j].stamp) } +func (h analyticsEventHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +// Push and Pop are unused: only heap.Init and heap.Fix are used. +func (h analyticsEventHeap) Push(x interface{}) {} +func (h analyticsEventHeap) Pop() interface{} { return nil } + +func newAnalyticsEvents(max int) *analyticsEvents { + return &analyticsEvents{ + numSeen: 0, + events: make(analyticsEventHeap, 0, max), + failedHarvests: 0, + } +} + +func (events *analyticsEvents) addEvent(e analyticsEvent) { + events.numSeen++ + + if len(events.events) < cap(events.events) { + events.events = append(events.events, e) + if len(events.events) == cap(events.events) { + // Delay heap initialization so that we can have + // deterministic ordering for integration tests (the max + // is not being reached). + heap.Init(events.events) + } + return + } + + if eventStampCmp(e.stamp, events.events[0].stamp) { + return + } + + events.events[0] = e + heap.Fix(events.events, 0) +} + +func (events *analyticsEvents) mergeFailed(other *analyticsEvents) { + fails := other.failedHarvests + 1 + if fails >= failedEventsAttemptsLimit { + return + } + events.failedHarvests = fails + events.Merge(other) +} + +func (events *analyticsEvents) Merge(other *analyticsEvents) { + allSeen := events.numSeen + other.numSeen + + for _, e := range other.events { + events.addEvent(e) + } + events.numSeen = allSeen +} + +func (events *analyticsEvents) CollectorJSON(agentRunID string) ([]byte, error) { + if 0 == events.numSeen { + return nil, nil + } + + estimate := 256 * len(events.events) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + + buf.WriteByte('[') + jsonx.AppendString(buf, agentRunID) + buf.WriteByte(',') + buf.WriteByte('{') + buf.WriteString(`"reservoir_size":`) + jsonx.AppendUint(buf, uint64(cap(events.events))) + buf.WriteByte(',') + buf.WriteString(`"events_seen":`) + jsonx.AppendUint(buf, uint64(events.numSeen)) + buf.WriteByte('}') + buf.WriteByte(',') + buf.WriteByte('[') + for i, e := range events.events { + if i > 0 { + buf.WriteByte(',') + } + e.WriteJSON(buf) + } + buf.WriteByte(']') + buf.WriteByte(']') + + return buf.Bytes(), nil + +} diff --git a/vendor/github.com/newrelic/go-agent/internal/analytics_events_test.go b/vendor/github.com/newrelic/go-agent/internal/analytics_events_test.go new file mode 100644 index 00000000..a48754e4 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/analytics_events_test.go @@ -0,0 +1,266 @@ +package internal + +import ( + "bytes" + "strconv" + "testing" + "time" +) + +var ( + agentRunID = `12345` +) + +type intWriter int + +func (x intWriter) WriteJSON(buf *bytes.Buffer) { + buf.WriteString(strconv.Itoa(int(x))) +} + +func sampleAnalyticsEvent(stamp int) analyticsEvent { + return analyticsEvent{ + eventStamp(stamp), + intWriter(stamp), + } +} + +func TestBasic(t *testing.T) { + events := newAnalyticsEvents(10) + events.addEvent(sampleAnalyticsEvent(1)) + events.addEvent(sampleAnalyticsEvent(1)) + events.addEvent(sampleAnalyticsEvent(1)) + + json, err := events.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + + expected := `["12345",{"reservoir_size":10,"events_seen":3},[1,1,1]]` + + if string(json) != expected { + t.Error(string(json), expected) + } + if 3 != events.numSeen { + t.Error(events.numSeen) + } + if 3 != events.NumSaved() { + t.Error(events.NumSaved()) + } +} + +func TestEmpty(t *testing.T) { + events := newAnalyticsEvents(10) + json, err := events.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + if nil != json { + t.Error(string(json)) + } + if 0 != events.numSeen { + t.Error(events.numSeen) + } + if 0 != events.NumSaved() { + t.Error(events.NumSaved()) + } +} + +func TestSampling(t *testing.T) { + events := newAnalyticsEvents(3) + events.addEvent(sampleAnalyticsEvent(10)) + events.addEvent(sampleAnalyticsEvent(1)) + events.addEvent(sampleAnalyticsEvent(9)) + events.addEvent(sampleAnalyticsEvent(2)) + events.addEvent(sampleAnalyticsEvent(8)) + events.addEvent(sampleAnalyticsEvent(3)) + + json, err := events.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + if string(json) != `["12345",{"reservoir_size":3,"events_seen":6},[8,10,9]]` { + t.Error(string(json)) + } + if 6 != events.numSeen { + t.Error(events.numSeen) + } + if 3 != events.NumSaved() { + t.Error(events.NumSaved()) + } +} + +func TestMergeEmpty(t *testing.T) { + e1 := newAnalyticsEvents(10) + e2 := newAnalyticsEvents(10) + e1.Merge(e2) + json, err := e1.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + if nil != json { + t.Error(string(json)) + } + if 0 != e1.numSeen { + t.Error(e1.numSeen) + } + if 0 != e1.NumSaved() { + t.Error(e1.NumSaved()) + } +} + +func TestMergeFull(t *testing.T) { + e1 := newAnalyticsEvents(2) + e2 := newAnalyticsEvents(3) + + e1.addEvent(sampleAnalyticsEvent(5)) + e1.addEvent(sampleAnalyticsEvent(10)) + e1.addEvent(sampleAnalyticsEvent(15)) + + e2.addEvent(sampleAnalyticsEvent(6)) + e2.addEvent(sampleAnalyticsEvent(12)) + e2.addEvent(sampleAnalyticsEvent(18)) + e2.addEvent(sampleAnalyticsEvent(24)) + + e1.Merge(e2) + json, err := e1.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + if string(json) != `["12345",{"reservoir_size":2,"events_seen":7},[18,24]]` { + t.Error(string(json)) + } + if 7 != e1.numSeen { + t.Error(e1.numSeen) + } + if 2 != e1.NumSaved() { + t.Error(e1.NumSaved()) + } +} + +func TestAnalyticsEventMergeFailedSuccess(t *testing.T) { + e1 := newAnalyticsEvents(2) + e2 := newAnalyticsEvents(3) + + e1.addEvent(sampleAnalyticsEvent(5)) + e1.addEvent(sampleAnalyticsEvent(10)) + e1.addEvent(sampleAnalyticsEvent(15)) + + e2.addEvent(sampleAnalyticsEvent(6)) + e2.addEvent(sampleAnalyticsEvent(12)) + e2.addEvent(sampleAnalyticsEvent(18)) + e2.addEvent(sampleAnalyticsEvent(24)) + + e1.mergeFailed(e2) + + json, err := e1.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + if string(json) != `["12345",{"reservoir_size":2,"events_seen":7},[18,24]]` { + t.Error(string(json)) + } + if 7 != e1.numSeen { + t.Error(e1.numSeen) + } + if 2 != e1.NumSaved() { + t.Error(e1.NumSaved()) + } + if 1 != e1.failedHarvests { + t.Error(e1.failedHarvests) + } +} + +func TestAnalyticsEventMergeFailedLimitReached(t *testing.T) { + e1 := newAnalyticsEvents(2) + e2 := newAnalyticsEvents(3) + + e1.addEvent(sampleAnalyticsEvent(5)) + e1.addEvent(sampleAnalyticsEvent(10)) + e1.addEvent(sampleAnalyticsEvent(15)) + + e2.addEvent(sampleAnalyticsEvent(6)) + e2.addEvent(sampleAnalyticsEvent(12)) + e2.addEvent(sampleAnalyticsEvent(18)) + e2.addEvent(sampleAnalyticsEvent(24)) + + e2.failedHarvests = failedEventsAttemptsLimit + + e1.mergeFailed(e2) + + json, err := e1.CollectorJSON(agentRunID) + if nil != err { + t.Fatal(err) + } + if string(json) != `["12345",{"reservoir_size":2,"events_seen":3},[10,15]]` { + t.Error(string(json)) + } + if 3 != e1.numSeen { + t.Error(e1.numSeen) + } + if 2 != e1.NumSaved() { + t.Error(e1.NumSaved()) + } + if 0 != e1.failedHarvests { + t.Error(e1.failedHarvests) + } +} + +func analyticsEventBenchmarkHelper(b *testing.B, w jsonWriter) { + events := newAnalyticsEvents(maxTxnEvents) + event := analyticsEvent{eventStamp(1), w} + for n := 0; n < maxTxnEvents; n++ { + events.addEvent(event) + } + + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < b.N; n++ { + js, err := events.CollectorJSON(agentRunID) + if nil != err { + b.Fatal(err, js) + } + } +} + +func BenchmarkTxnEventsCollectorJSON(b *testing.B) { + event := &TxnEvent{ + FinalName: "WebTransaction/Go/zip/zap", + Start: time.Now(), + Duration: 2 * time.Second, + Queuing: 1 * time.Second, + Zone: ApdexSatisfying, + Attrs: nil, + } + analyticsEventBenchmarkHelper(b, event) +} + +func BenchmarkCustomEventsCollectorJSON(b *testing.B) { + now := time.Now() + ce, err := CreateCustomEvent("myEventType", map[string]interface{}{ + "string": "myString", + "bool": true, + "int64": int64(123), + "nil": nil, + }, now) + if nil != err { + b.Fatal(err) + } + analyticsEventBenchmarkHelper(b, ce) +} + +func BenchmarkErrorEventsCollectorJSON(b *testing.B) { + e := TxnErrorFromResponseCode(time.Now(), 503) + e.Stack = GetStackTrace(0) + + txnName := "WebTransaction/Go/zip/zap" + event := &ErrorEvent{ + ErrorData: e, + TxnEvent: TxnEvent{ + FinalName: txnName, + Duration: 3 * time.Second, + Attrs: nil, + }, + } + analyticsEventBenchmarkHelper(b, event) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/apdex.go b/vendor/github.com/newrelic/go-agent/internal/apdex.go new file mode 100644 index 00000000..28225f7d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/apdex.go @@ -0,0 +1,48 @@ +package internal + +import "time" + +// ApdexZone is a transaction classification. +type ApdexZone int + +// https://en.wikipedia.org/wiki/Apdex +const ( + ApdexNone ApdexZone = iota + ApdexSatisfying + ApdexTolerating + ApdexFailing +) + +// ApdexFailingThreshold calculates the threshold at which the transaction is +// considered a failure. +func ApdexFailingThreshold(threshold time.Duration) time.Duration { + return 4 * threshold +} + +// CalculateApdexZone calculates the apdex based on the transaction duration and +// threshold. +// +// Note that this does not take into account whether or not the transaction +// had an error. That is expected to be done by the caller. +func CalculateApdexZone(threshold, duration time.Duration) ApdexZone { + if duration <= threshold { + return ApdexSatisfying + } + if duration <= ApdexFailingThreshold(threshold) { + return ApdexTolerating + } + return ApdexFailing +} + +func (zone ApdexZone) label() string { + switch zone { + case ApdexSatisfying: + return "S" + case ApdexTolerating: + return "T" + case ApdexFailing: + return "F" + default: + return "" + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/apdex_test.go b/vendor/github.com/newrelic/go-agent/internal/apdex_test.go new file mode 100644 index 00000000..768b9537 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/apdex_test.go @@ -0,0 +1,46 @@ +package internal + +import ( + "testing" + "time" +) + +func dur(d int) time.Duration { + return time.Duration(d) +} + +func TestCalculateApdexZone(t *testing.T) { + if z := CalculateApdexZone(dur(10), dur(1)); z != ApdexSatisfying { + t.Fatal(z) + } + if z := CalculateApdexZone(dur(10), dur(10)); z != ApdexSatisfying { + t.Fatal(z) + } + if z := CalculateApdexZone(dur(10), dur(11)); z != ApdexTolerating { + t.Fatal(z) + } + if z := CalculateApdexZone(dur(10), dur(40)); z != ApdexTolerating { + t.Fatal(z) + } + if z := CalculateApdexZone(dur(10), dur(41)); z != ApdexFailing { + t.Fatal(z) + } + if z := CalculateApdexZone(dur(10), dur(100)); z != ApdexFailing { + t.Fatal(z) + } +} + +func TestApdexLabel(t *testing.T) { + if out := ApdexSatisfying.label(); "S" != out { + t.Fatal(out) + } + if out := ApdexTolerating.label(); "T" != out { + t.Fatal(out) + } + if out := ApdexFailing.label(); "F" != out { + t.Fatal(out) + } + if out := ApdexNone.label(); "" != out { + t.Fatal(out) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/attributes.go b/vendor/github.com/newrelic/go-agent/internal/attributes.go new file mode 100644 index 00000000..e0209472 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/attributes.go @@ -0,0 +1,548 @@ +package internal + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "sort" + "strconv" + "strings" +) + +// New agent attributes must be added in the following places: +// * Constants here. +// * Top level attributes.go file. +// * agentAttributes +// * agentAttributeDests +// * calculateAgentAttributeDests +// * writeAgentAttributes +const ( + responseCode = "httpResponseCode" + requestMethod = "request.method" + requestAccept = "request.headers.accept" + requestContentType = "request.headers.contentType" + requestContentLength = "request.headers.contentLength" + requestHost = "request.headers.host" + responseContentType = "response.headers.contentType" + responseContentLength = "response.headers.contentLength" + hostDisplayName = "host.displayName" + requestUserAgent = "request.headers.User-Agent" + requestReferer = "request.headers.referer" +) + +// https://source.datanerd.us/agents/agent-specs/blob/master/Agent-Attributes-PORTED.md + +// AttributeDestinationConfig matches newrelic.AttributeDestinationConfig to +// avoid circular dependency issues. +type AttributeDestinationConfig struct { + Enabled bool + Include []string + Exclude []string +} + +type destinationSet int + +const ( + destTxnEvent destinationSet = 1 << iota + destError + destTxnTrace + destBrowser +) + +const ( + destNone destinationSet = 0 + // DestAll contains all destinations. + DestAll destinationSet = destTxnEvent | destTxnTrace | destError | destBrowser +) + +const ( + attributeWildcardSuffix = '*' +) + +type attributeModifier struct { + match string // This will not contain a trailing '*'. + includeExclude +} + +type byMatch []*attributeModifier + +func (m byMatch) Len() int { return len(m) } +func (m byMatch) Swap(i, j int) { m[i], m[j] = m[j], m[i] } +func (m byMatch) Less(i, j int) bool { return m[i].match < m[j].match } + +// AttributeConfig is created at application creation and shared between all +// transactions. +type AttributeConfig struct { + disabledDestinations destinationSet + exactMatchModifiers map[string]*attributeModifier + // Once attributeConfig is constructed, wildcardModifiers is sorted in + // lexicographical order. Modifiers appearing later have precedence + // over modifiers appearing earlier. + wildcardModifiers []*attributeModifier + agentDests agentAttributeDests +} + +type includeExclude struct { + include destinationSet + exclude destinationSet +} + +func modifierApply(m *attributeModifier, d destinationSet) destinationSet { + // Include before exclude, since exclude has priority. + d |= m.include + d &^= m.exclude + return d +} + +func applyAttributeConfig(c *AttributeConfig, key string, d destinationSet) destinationSet { + // Important: The wildcard modifiers must be applied before the exact + // match modifiers, and the slice must be iterated in a forward + // direction. + for _, m := range c.wildcardModifiers { + if strings.HasPrefix(key, m.match) { + d = modifierApply(m, d) + } + } + + if m, ok := c.exactMatchModifiers[key]; ok { + d = modifierApply(m, d) + } + + d &^= c.disabledDestinations + + return d +} + +func addModifier(c *AttributeConfig, match string, d includeExclude) { + if "" == match { + return + } + exactMatch := true + if attributeWildcardSuffix == match[len(match)-1] { + exactMatch = false + match = match[0 : len(match)-1] + } + mod := &attributeModifier{ + match: match, + includeExclude: d, + } + + if exactMatch { + if m, ok := c.exactMatchModifiers[mod.match]; ok { + m.include |= mod.include + m.exclude |= mod.exclude + } else { + c.exactMatchModifiers[mod.match] = mod + } + } else { + for _, m := range c.wildcardModifiers { + // Important: Duplicate entries for the same match + // string would not work because exclude needs + // precedence over include. + if m.match == mod.match { + m.include |= mod.include + m.exclude |= mod.exclude + return + } + } + c.wildcardModifiers = append(c.wildcardModifiers, mod) + } +} + +func processDest(c *AttributeConfig, dc *AttributeDestinationConfig, d destinationSet) { + if !dc.Enabled { + c.disabledDestinations |= d + } + for _, match := range dc.Include { + addModifier(c, match, includeExclude{include: d}) + } + for _, match := range dc.Exclude { + addModifier(c, match, includeExclude{exclude: d}) + } +} + +// AttributeConfigInput is used as the input to CreateAttributeConfig: it +// transforms newrelic.Config settings into an AttributeConfig. +type AttributeConfigInput struct { + Attributes AttributeDestinationConfig + ErrorCollector AttributeDestinationConfig + TransactionEvents AttributeDestinationConfig + browserMonitoring AttributeDestinationConfig + TransactionTracer AttributeDestinationConfig +} + +var ( + sampleAttributeConfigInput = AttributeConfigInput{ + Attributes: AttributeDestinationConfig{Enabled: true}, + ErrorCollector: AttributeDestinationConfig{Enabled: true}, + TransactionEvents: AttributeDestinationConfig{Enabled: true}, + TransactionTracer: AttributeDestinationConfig{Enabled: true}, + } +) + +// CreateAttributeConfig creates a new AttributeConfig. +func CreateAttributeConfig(input AttributeConfigInput) *AttributeConfig { + c := &AttributeConfig{ + exactMatchModifiers: make(map[string]*attributeModifier), + wildcardModifiers: make([]*attributeModifier, 0, 64), + } + + processDest(c, &input.Attributes, DestAll) + processDest(c, &input.ErrorCollector, destError) + processDest(c, &input.TransactionEvents, destTxnEvent) + processDest(c, &input.TransactionTracer, destTxnTrace) + processDest(c, &input.browserMonitoring, destBrowser) + + sort.Sort(byMatch(c.wildcardModifiers)) + + c.agentDests = calculateAgentAttributeDests(c) + + return c +} + +type userAttribute struct { + value interface{} + dests destinationSet +} + +// Attributes are key value pairs attached to the various collected data types. +type Attributes struct { + config *AttributeConfig + user map[string]userAttribute + Agent agentAttributes +} + +type agentAttributes struct { + HostDisplayName string + RequestMethod string + RequestAcceptHeader string + RequestContentType string + RequestContentLength int + RequestHeadersHost string + RequestHeadersUserAgent string + RequestHeadersReferer string + ResponseHeadersContentType string + ResponseHeadersContentLength int + ResponseCode string +} + +type agentAttributeDests struct { + HostDisplayName destinationSet + RequestMethod destinationSet + RequestAcceptHeader destinationSet + RequestContentType destinationSet + RequestContentLength destinationSet + RequestHeadersHost destinationSet + RequestHeadersUserAgent destinationSet + RequestHeadersReferer destinationSet + ResponseHeadersContentType destinationSet + ResponseHeadersContentLength destinationSet + ResponseCode destinationSet +} + +func calculateAgentAttributeDests(c *AttributeConfig) agentAttributeDests { + usual := DestAll &^ destBrowser + traces := destTxnTrace | destError + return agentAttributeDests{ + HostDisplayName: applyAttributeConfig(c, hostDisplayName, usual), + RequestMethod: applyAttributeConfig(c, requestMethod, usual), + RequestAcceptHeader: applyAttributeConfig(c, requestAccept, usual), + RequestContentType: applyAttributeConfig(c, requestContentType, usual), + RequestContentLength: applyAttributeConfig(c, requestContentLength, usual), + RequestHeadersHost: applyAttributeConfig(c, requestHost, usual), + RequestHeadersUserAgent: applyAttributeConfig(c, requestUserAgent, traces), + RequestHeadersReferer: applyAttributeConfig(c, requestReferer, traces), + ResponseHeadersContentType: applyAttributeConfig(c, responseContentType, usual), + ResponseHeadersContentLength: applyAttributeConfig(c, responseContentLength, usual), + ResponseCode: applyAttributeConfig(c, responseCode, usual), + } +} + +type agentAttributeWriter struct { + jsonFieldsWriter + d destinationSet +} + +func (w *agentAttributeWriter) writeString(name string, val string, d destinationSet) { + if "" != val && 0 != w.d&d { + w.stringField(name, truncateStringValueIfLong(val)) + } +} + +func (w *agentAttributeWriter) writeInt(name string, val int, d destinationSet) { + if val >= 0 && 0 != w.d&d { + w.intField(name, int64(val)) + } +} + +func writeAgentAttributes(buf *bytes.Buffer, d destinationSet, values agentAttributes, dests agentAttributeDests) { + w := &agentAttributeWriter{ + jsonFieldsWriter: jsonFieldsWriter{buf: buf}, + d: d, + } + buf.WriteByte('{') + w.writeString(hostDisplayName, values.HostDisplayName, dests.HostDisplayName) + w.writeString(requestMethod, values.RequestMethod, dests.RequestMethod) + w.writeString(requestAccept, values.RequestAcceptHeader, dests.RequestAcceptHeader) + w.writeString(requestContentType, values.RequestContentType, dests.RequestContentType) + w.writeInt(requestContentLength, values.RequestContentLength, dests.RequestContentLength) + w.writeString(requestHost, values.RequestHeadersHost, dests.RequestHeadersHost) + w.writeString(requestUserAgent, values.RequestHeadersUserAgent, dests.RequestHeadersUserAgent) + w.writeString(requestReferer, values.RequestHeadersReferer, dests.RequestHeadersReferer) + w.writeString(responseContentType, values.ResponseHeadersContentType, dests.ResponseHeadersContentType) + w.writeInt(responseContentLength, values.ResponseHeadersContentLength, dests.ResponseHeadersContentLength) + w.writeString(responseCode, values.ResponseCode, dests.ResponseCode) + buf.WriteByte('}') +} + +// NewAttributes creates a new Attributes. +func NewAttributes(config *AttributeConfig) *Attributes { + return &Attributes{ + config: config, + Agent: agentAttributes{ + RequestContentLength: -1, + ResponseHeadersContentLength: -1, + }, + } +} + +// ErrInvalidAttribute is returned when the value is not valid. +type ErrInvalidAttribute struct{ typeString string } + +func (e ErrInvalidAttribute) Error() string { + return fmt.Sprintf("attribute value type %s is invalid", e.typeString) +} + +func valueIsValid(val interface{}) error { + switch val.(type) { + case string, bool, nil, + uint8, uint16, uint32, uint64, int8, int16, int32, int64, + float32, float64, uint, int, uintptr: + return nil + default: + return ErrInvalidAttribute{ + typeString: fmt.Sprintf("%T", val), + } + } +} + +type invalidAttributeKeyErr struct{ key string } + +func (e invalidAttributeKeyErr) Error() string { + return fmt.Sprintf("attribute key '%.32s...' exceeds length limit %d", + e.key, attributeKeyLengthLimit) +} + +type userAttributeLimitErr struct{ key string } + +func (e userAttributeLimitErr) Error() string { + return fmt.Sprintf("attribute '%s' discarded: limit of %d reached", e.key, + attributeUserLimit) +} + +func validAttributeKey(key string) error { + // Attributes whose keys are excessively long are dropped rather than + // truncated to avoid worrying about the application of configuration to + // truncated values or performing the truncation after configuration. + if len(key) > attributeKeyLengthLimit { + return invalidAttributeKeyErr{key: key} + } + return nil +} + +func truncateStringValueIfLong(val string) string { + if len(val) > attributeValueLengthLimit { + return StringLengthByteLimit(val, attributeValueLengthLimit) + } + return val +} + +func truncateStringValueIfLongInterface(val interface{}) interface{} { + if str, ok := val.(string); ok { + val = interface{}(truncateStringValueIfLong(str)) + } + return val +} + +// AddUserAttribute adds a user attribute. +func AddUserAttribute(a *Attributes, key string, val interface{}, d destinationSet) error { + val = truncateStringValueIfLongInterface(val) + if err := valueIsValid(val); nil != err { + return err + } + if err := validAttributeKey(key); nil != err { + return err + } + dests := applyAttributeConfig(a.config, key, d) + if destNone == dests { + return nil + } + if nil == a.user { + a.user = make(map[string]userAttribute) + } + + if _, exists := a.user[key]; !exists && len(a.user) >= attributeUserLimit { + return userAttributeLimitErr{key} + } + + // Note: Duplicates are overridden: last attribute in wins. + a.user[key] = userAttribute{ + value: val, + dests: dests, + } + return nil +} + +func writeAttributeValueJSON(w *jsonFieldsWriter, key string, val interface{}) { + switch v := val.(type) { + case nil: + w.rawField(key, `null`) + case string: + w.stringField(key, v) + case bool: + if v { + w.rawField(key, `true`) + } else { + w.rawField(key, `false`) + } + case uint8: + w.intField(key, int64(v)) + case uint16: + w.intField(key, int64(v)) + case uint32: + w.intField(key, int64(v)) + case uint64: + w.intField(key, int64(v)) + case uint: + w.intField(key, int64(v)) + case uintptr: + w.intField(key, int64(v)) + case int8: + w.intField(key, int64(v)) + case int16: + w.intField(key, int64(v)) + case int32: + w.intField(key, int64(v)) + case int64: + w.intField(key, v) + case int: + w.intField(key, int64(v)) + case float32: + w.floatField(key, float64(v)) + case float64: + w.floatField(key, v) + default: + w.stringField(key, fmt.Sprintf("%T", v)) + } +} + +func agentAttributesJSON(a *Attributes, buf *bytes.Buffer, d destinationSet) { + if nil == a { + buf.WriteString("{}") + return + } + writeAgentAttributes(buf, d, a.Agent, a.config.agentDests) +} + +func userAttributesJSON(a *Attributes, buf *bytes.Buffer, d destinationSet) { + buf.WriteByte('{') + if nil != a { + w := jsonFieldsWriter{buf: buf} + for name, atr := range a.user { + if 0 != atr.dests&d { + writeAttributeValueJSON(&w, name, atr.value) + } + } + } + buf.WriteByte('}') +} + +func userAttributesStringJSON(a *Attributes, d destinationSet) JSONString { + if nil == a { + return JSONString("{}") + } + estimate := len(a.user) * 128 + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + userAttributesJSON(a, buf, d) + bs := buf.Bytes() + return JSONString(bs) +} + +func agentAttributesStringJSON(a *Attributes, d destinationSet) JSONString { + if nil == a { + return JSONString("{}") + } + estimate := 1024 + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + agentAttributesJSON(a, buf, d) + return JSONString(buf.Bytes()) +} + +func getUserAttributes(a *Attributes, d destinationSet) map[string]interface{} { + v := make(map[string]interface{}) + json.Unmarshal([]byte(userAttributesStringJSON(a, d)), &v) + return v +} + +func getAgentAttributes(a *Attributes, d destinationSet) map[string]interface{} { + v := make(map[string]interface{}) + json.Unmarshal([]byte(agentAttributesStringJSON(a, d)), &v) + return v +} + +// RequestAgentAttributes gathers agent attributes out of the request. +func RequestAgentAttributes(a *Attributes, r *http.Request) { + a.Agent.RequestMethod = r.Method + + h := r.Header + if nil == h { + return + } + a.Agent.RequestAcceptHeader = h.Get("Accept") + a.Agent.RequestContentType = h.Get("Content-Type") + a.Agent.RequestHeadersHost = h.Get("Host") + a.Agent.RequestHeadersUserAgent = h.Get("User-Agent") + a.Agent.RequestHeadersReferer = SafeURLFromString(h.Get("Referer")) + + if cl := h.Get("Content-Length"); "" != cl { + if x, err := strconv.Atoi(cl); nil == err { + a.Agent.RequestContentLength = x + } + } +} + +// ResponseHeaderAttributes gather agent attributes from the response headers. +func ResponseHeaderAttributes(a *Attributes, h http.Header) { + if nil == h { + return + } + a.Agent.ResponseHeadersContentType = h.Get("Content-Type") + if val := h.Get("Content-Length"); "" != val { + if x, err := strconv.Atoi(val); nil == err { + a.Agent.ResponseHeadersContentLength = x + } + } +} + +var ( + // statusCodeLookup avoids a strconv.Itoa call. + statusCodeLookup = map[int]string{ + 100: "100", 101: "101", + 200: "200", 201: "201", 202: "202", 203: "203", 204: "204", 205: "205", 206: "206", + 300: "300", 301: "301", 302: "302", 303: "303", 304: "304", 305: "305", 307: "307", + 400: "400", 401: "401", 402: "402", 403: "403", 404: "404", 405: "405", 406: "406", + 407: "407", 408: "408", 409: "409", 410: "410", 411: "411", 412: "412", 413: "413", + 414: "414", 415: "415", 416: "416", 417: "417", 418: "418", 428: "428", 429: "429", + 431: "431", 451: "451", + 500: "500", 501: "501", 502: "502", 503: "503", 504: "504", 505: "505", 511: "511", + } +) + +// ResponseCodeAttribute sets the response code agent attribute. +func ResponseCodeAttribute(a *Attributes, code int) { + a.Agent.ResponseCode = statusCodeLookup[code] + if a.Agent.ResponseCode == "" { + a.Agent.ResponseCode = strconv.Itoa(code) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/attributes_test.go b/vendor/github.com/newrelic/go-agent/internal/attributes_test.go new file mode 100644 index 00000000..8bc84489 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/attributes_test.go @@ -0,0 +1,270 @@ +package internal + +import ( + "bytes" + "encoding/json" + "strconv" + "strings" + "testing" + + "github.com/newrelic/go-agent/internal/crossagent" +) + +type AttributeTestcase struct { + Testname string `json:"testname"` + Config struct { + AttributesEnabled bool `json:"attributes.enabled"` + AttributesInclude []string `json:"attributes.include"` + AttributesExclude []string `json:"attributes.exclude"` + BrowserAttributesEnabled bool `json:"browser_monitoring.attributes.enabled"` + BrowserAttributesInclude []string `json:"browser_monitoring.attributes.include"` + BrowserAttributesExclude []string `json:"browser_monitoring.attributes.exclude"` + ErrorAttributesEnabled bool `json:"error_collector.attributes.enabled"` + ErrorAttributesInclude []string `json:"error_collector.attributes.include"` + ErrorAttributesExclude []string `json:"error_collector.attributes.exclude"` + EventsAttributesEnabled bool `json:"transaction_events.attributes.enabled"` + EventsAttributesInclude []string `json:"transaction_events.attributes.include"` + EventsAttributesExclude []string `json:"transaction_events.attributes.exclude"` + TracerAttributesEnabled bool `json:"transaction_tracer.attributes.enabled"` + TracerAttributesInclude []string `json:"transaction_tracer.attributes.include"` + TracerAttributesExclude []string `json:"transaction_tracer.attributes.exclude"` + } `json:"config"` + Key string `json:"input_key"` + InputDestinations []string `json:"input_default_destinations"` + ExpectedDestinations []string `json:"expected_destinations"` +} + +var ( + destTranslate = map[string]destinationSet{ + "attributes": DestAll, + "transaction_events": destTxnEvent, + "transaction_tracer": destTxnTrace, + "error_collector": destError, + "browser_monitoring": destBrowser, + } +) + +func destinationsFromArray(dests []string) destinationSet { + d := destNone + for _, s := range dests { + if x, ok := destTranslate[s]; ok { + d |= x + } + } + return d +} + +func destToString(d destinationSet) string { + if 0 == d { + return "none" + } + out := "" + for _, ds := range []struct { + Name string + Dest destinationSet + }{ + {Name: "event", Dest: destTxnEvent}, + {Name: "trace", Dest: destTxnTrace}, + {Name: "error", Dest: destError}, + {Name: "browser", Dest: destBrowser}, + } { + if 0 != d&ds.Dest { + if "" == out { + out = ds.Name + } else { + out = out + "," + ds.Name + } + } + } + return out +} + +func runAttributeTestcase(t *testing.T, js json.RawMessage) { + var tc AttributeTestcase + + tc.Config.AttributesEnabled = true + tc.Config.BrowserAttributesEnabled = false + tc.Config.ErrorAttributesEnabled = true + tc.Config.EventsAttributesEnabled = true + tc.Config.TracerAttributesEnabled = true + + if err := json.Unmarshal(js, &tc); nil != err { + t.Error(err) + return + } + + input := AttributeConfigInput{ + Attributes: AttributeDestinationConfig{ + Enabled: tc.Config.AttributesEnabled, + Include: tc.Config.AttributesInclude, + Exclude: tc.Config.AttributesExclude, + }, + ErrorCollector: AttributeDestinationConfig{ + Enabled: tc.Config.ErrorAttributesEnabled, + Include: tc.Config.ErrorAttributesInclude, + Exclude: tc.Config.ErrorAttributesExclude, + }, + TransactionEvents: AttributeDestinationConfig{ + Enabled: tc.Config.EventsAttributesEnabled, + Include: tc.Config.EventsAttributesInclude, + Exclude: tc.Config.EventsAttributesExclude, + }, + browserMonitoring: AttributeDestinationConfig{ + Enabled: tc.Config.BrowserAttributesEnabled, + Include: tc.Config.BrowserAttributesInclude, + Exclude: tc.Config.BrowserAttributesExclude, + }, + TransactionTracer: AttributeDestinationConfig{ + Enabled: tc.Config.TracerAttributesEnabled, + Include: tc.Config.TracerAttributesInclude, + Exclude: tc.Config.TracerAttributesExclude, + }, + } + + cfg := CreateAttributeConfig(input) + + inputDests := destinationsFromArray(tc.InputDestinations) + expectedDests := destinationsFromArray(tc.ExpectedDestinations) + + out := applyAttributeConfig(cfg, tc.Key, inputDests) + + if out != expectedDests { + t.Error(tc.Testname, destToString(expectedDests), + destToString(out)) + } +} + +func TestCrossAgentAttributes(t *testing.T) { + var tcs []json.RawMessage + + err := crossagent.ReadJSON("attribute_configuration.json", &tcs) + if err != nil { + t.Fatal(err) + } + + for _, tc := range tcs { + runAttributeTestcase(t, tc) + } +} + +func TestWriteAttributeValueJSON(t *testing.T) { + buf := &bytes.Buffer{} + w := jsonFieldsWriter{buf: buf} + + buf.WriteByte('{') + writeAttributeValueJSON(&w, "a", nil) + writeAttributeValueJSON(&w, "a", `escape\me!`) + writeAttributeValueJSON(&w, "a", true) + writeAttributeValueJSON(&w, "a", false) + writeAttributeValueJSON(&w, "a", uint8(1)) + writeAttributeValueJSON(&w, "a", uint16(2)) + writeAttributeValueJSON(&w, "a", uint32(3)) + writeAttributeValueJSON(&w, "a", uint64(4)) + writeAttributeValueJSON(&w, "a", uint(5)) + writeAttributeValueJSON(&w, "a", uintptr(6)) + writeAttributeValueJSON(&w, "a", int8(-1)) + writeAttributeValueJSON(&w, "a", int16(-2)) + writeAttributeValueJSON(&w, "a", int32(-3)) + writeAttributeValueJSON(&w, "a", int64(-4)) + writeAttributeValueJSON(&w, "a", int(-5)) + writeAttributeValueJSON(&w, "a", float32(1.5)) + writeAttributeValueJSON(&w, "a", float64(4.56)) + buf.WriteByte('}') + + expect := CompactJSONString(`{ + "a":null, + "a":"escape\\me!", + "a":true, + "a":false, + "a":1, + "a":2, + "a":3, + "a":4, + "a":5, + "a":6, + "a":-1, + "a":-2, + "a":-3, + "a":-4, + "a":-5, + "a":1.5, + "a":4.56 + }`) + js := string(buf.Bytes()) + if js != expect { + t.Error(js, expect) + } +} + +func TestUserAttributeValLength(t *testing.T) { + cfg := CreateAttributeConfig(sampleAttributeConfigInput) + attrs := NewAttributes(cfg) + + atLimit := strings.Repeat("a", attributeValueLengthLimit) + tooLong := atLimit + "a" + + err := AddUserAttribute(attrs, `escape\me`, tooLong, DestAll) + if err != nil { + t.Error(err) + } + js := userAttributesStringJSON(attrs, DestAll) + if `{"escape\\me":"`+atLimit+`"}` != string(js) { + t.Error(js) + } +} + +func TestUserAttributeKeyLength(t *testing.T) { + cfg := CreateAttributeConfig(sampleAttributeConfigInput) + attrs := NewAttributes(cfg) + + lengthyKey := strings.Repeat("a", attributeKeyLengthLimit+1) + err := AddUserAttribute(attrs, lengthyKey, 123, DestAll) + if _, ok := err.(invalidAttributeKeyErr); !ok { + t.Error(err) + } + js := userAttributesStringJSON(attrs, DestAll) + if `{}` != string(js) { + t.Error(js) + } +} + +func TestNumUserAttributesLimit(t *testing.T) { + cfg := CreateAttributeConfig(sampleAttributeConfigInput) + attrs := NewAttributes(cfg) + + for i := 0; i < attributeUserLimit; i++ { + s := strconv.Itoa(i) + err := AddUserAttribute(attrs, s, s, DestAll) + if err != nil { + t.Fatal(err) + } + } + + err := AddUserAttribute(attrs, "cant_add_me", 123, DestAll) + if _, ok := err.(userAttributeLimitErr); !ok { + t.Fatal(err) + } + + js := userAttributesStringJSON(attrs, DestAll) + var out map[string]string + err = json.Unmarshal([]byte(js), &out) + if nil != err { + t.Fatal(err) + } + if len(out) != attributeUserLimit { + t.Error(len(out)) + } + if strings.Contains(string(js), "cant_add_me") { + t.Fatal(js) + } + + // Now test that replacement works when the limit is reached. + err = AddUserAttribute(attrs, "0", "BEEN_REPLACED", DestAll) + if nil != err { + t.Fatal(err) + } + js = userAttributesStringJSON(attrs, DestAll) + if !strings.Contains(string(js), "BEEN_REPLACED") { + t.Fatal(js) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/collector.go b/vendor/github.com/newrelic/go-agent/internal/collector.go new file mode 100644 index 00000000..5f30fefe --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/collector.go @@ -0,0 +1,274 @@ +package internal + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/newrelic/go-agent/internal/logger" +) + +const ( + procotolVersion = "14" + userAgentPrefix = "NewRelic-Go-Agent/" + + // Methods used in collector communication. + cmdRedirect = "get_redirect_host" + cmdConnect = "connect" + cmdMetrics = "metric_data" + cmdCustomEvents = "custom_event_data" + cmdTxnEvents = "analytic_event_data" + cmdErrorEvents = "error_event_data" + cmdErrorData = "error_data" + cmdTxnTraces = "transaction_sample_data" + cmdSlowSQLs = "sql_trace_data" +) + +var ( + // ErrPayloadTooLarge is created in response to receiving a 413 response + // code. + ErrPayloadTooLarge = errors.New("payload too large") + // ErrUnauthorized is created in response to receiving a 401 response code. + ErrUnauthorized = errors.New("unauthorized") + // ErrUnsupportedMedia is created in response to receiving a 415 + // response code. + ErrUnsupportedMedia = errors.New("unsupported media") +) + +// RpmCmd contains fields specific to an individual call made to RPM. +type RpmCmd struct { + Name string + Collector string + RunID string + Data []byte +} + +// RpmControls contains fields which will be the same for all calls made +// by the same application. +type RpmControls struct { + UseTLS bool + License string + Client *http.Client + Logger logger.Logger + AgentVersion string +} + +func rpmURL(cmd RpmCmd, cs RpmControls) string { + var u url.URL + + u.Host = cmd.Collector + u.Path = "agent_listener/invoke_raw_method" + + if cs.UseTLS { + u.Scheme = "https" + } else { + u.Scheme = "http" + } + + query := url.Values{} + query.Set("marshal_format", "json") + query.Set("protocol_version", procotolVersion) + query.Set("method", cmd.Name) + query.Set("license_key", cs.License) + + if len(cmd.RunID) > 0 { + query.Set("run_id", cmd.RunID) + } + + u.RawQuery = query.Encode() + return u.String() +} + +type unexpectedStatusCodeErr struct { + code int +} + +func (e unexpectedStatusCodeErr) Error() string { + return fmt.Sprintf("unexpected HTTP status code: %d", e.code) +} + +func collectorRequestInternal(url string, data []byte, cs RpmControls) ([]byte, error) { + deflated, err := compress(data) + if nil != err { + return nil, err + } + + req, err := http.NewRequest("POST", url, deflated) + if nil != err { + return nil, err + } + + req.Header.Add("Accept-Encoding", "identity, deflate") + req.Header.Add("Content-Type", "application/octet-stream") + req.Header.Add("User-Agent", userAgentPrefix+cs.AgentVersion) + req.Header.Add("Content-Encoding", "deflate") + + resp, err := cs.Client.Do(req) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + switch resp.StatusCode { + case 200: + // Nothing to do. + case 401: + return nil, ErrUnauthorized + case 413: + return nil, ErrPayloadTooLarge + case 415: + return nil, ErrUnsupportedMedia + default: + // If the response code is not 200, then the collector may not return + // valid JSON. + return nil, unexpectedStatusCodeErr{code: resp.StatusCode} + } + + // Read the entire response, rather than using resp.Body as input to json.NewDecoder to + // avoid the issue described here: + // https://github.com/google/go-github/pull/317 + // https://ahmetalpbalkan.com/blog/golang-json-decoder-pitfalls/ + // Also, collector JSON responses are expected to be quite small. + b, err := ioutil.ReadAll(resp.Body) + if nil != err { + return nil, err + } + return parseResponse(b) +} + +// CollectorRequest makes a request to New Relic. +func CollectorRequest(cmd RpmCmd, cs RpmControls) ([]byte, error) { + url := rpmURL(cmd, cs) + + if cs.Logger.DebugEnabled() { + cs.Logger.Debug("rpm request", map[string]interface{}{ + "command": cmd.Name, + "url": url, + "payload": JSONString(cmd.Data), + }) + } + + resp, err := collectorRequestInternal(url, cmd.Data, cs) + if err != nil { + cs.Logger.Debug("rpm failure", map[string]interface{}{ + "command": cmd.Name, + "url": url, + "error": err.Error(), + }) + } + + if cs.Logger.DebugEnabled() { + cs.Logger.Debug("rpm response", map[string]interface{}{ + "command": cmd.Name, + "url": url, + "response": JSONString(resp), + }) + } + + return resp, err +} + +type rpmException struct { + Message string `json:"message"` + ErrorType string `json:"error_type"` +} + +func (e *rpmException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorType, e.Message) +} + +func hasType(e error, expected string) bool { + rpmErr, ok := e.(*rpmException) + if !ok { + return false + } + return rpmErr.ErrorType == expected + +} + +const ( + forceRestartType = "NewRelic::Agent::ForceRestartException" + disconnectType = "NewRelic::Agent::ForceDisconnectException" + licenseInvalidType = "NewRelic::Agent::LicenseException" + runtimeType = "RuntimeError" +) + +// IsRestartException indicates if the error was a restart exception. +func IsRestartException(e error) bool { return hasType(e, forceRestartType) } + +// IsLicenseException indicates if the error was an invalid exception. +func IsLicenseException(e error) bool { return hasType(e, licenseInvalidType) } + +// IsRuntime indicates if the error was a runtime exception. +func IsRuntime(e error) bool { return hasType(e, runtimeType) } + +// IsDisconnect indicates if the error was a disconnect exception. +func IsDisconnect(e error) bool { return hasType(e, disconnectType) } + +func parseResponse(b []byte) ([]byte, error) { + var r struct { + ReturnValue json.RawMessage `json:"return_value"` + Exception *rpmException `json:"exception"` + } + + err := json.Unmarshal(b, &r) + if nil != err { + return nil, err + } + + if nil != r.Exception { + return nil, r.Exception + } + + return r.ReturnValue, nil +} + +// ConnectAttempt tries to connect an application. +func ConnectAttempt(js []byte, redirectHost string, cs RpmControls) (*AppRun, error) { + call := RpmCmd{ + Name: cmdRedirect, + Collector: redirectHost, + Data: []byte("[]"), + } + + out, err := CollectorRequest(call, cs) + if nil != err { + // err is intentionally unmodified: We do not want to change + // the type of these collector errors. + return nil, err + } + + var host string + err = json.Unmarshal(out, &host) + if nil != err { + return nil, fmt.Errorf("unable to parse redirect reply: %v", err) + } + + call.Collector = host + call.Data = js + call.Name = cmdConnect + + rawReply, err := CollectorRequest(call, cs) + if nil != err { + // err is intentionally unmodified: We do not want to change + // the type of these collector errors. + return nil, err + } + + reply := ConnectReplyDefaults() + err = json.Unmarshal(rawReply, reply) + if nil != err { + return nil, fmt.Errorf("unable to parse connect reply: %v", err) + } + // Note: This should never happen. It would mean the collector + // response is malformed. This exists merely as extra defensiveness. + if "" == reply.RunID { + return nil, errors.New("connect reply missing agent run id") + } + + return &AppRun{reply, host}, nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/collector_test.go b/vendor/github.com/newrelic/go-agent/internal/collector_test.go new file mode 100644 index 00000000..85f00d2e --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/collector_test.go @@ -0,0 +1,446 @@ +package internal + +import ( + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "testing" + + "github.com/newrelic/go-agent/internal/logger" +) + +func TestLicenseInvalid(t *testing.T) { + r := CompactJSONString(`{ + "exception":{ + "message":"Invalid license key, please contact support@newrelic.com", + "error_type":"NewRelic::Agent::LicenseException" + } + }`) + reply, err := parseResponse([]byte(r)) + if reply != nil { + t.Fatal(string(reply)) + } + if !IsLicenseException(err) { + t.Fatal(err) + } +} + +func TestRedirectSuccess(t *testing.T) { + r := `{"return_value":"staging-collector-101.newrelic.com"}` + reply, err := parseResponse([]byte(r)) + if nil != err { + t.Fatal(err) + } + if string(reply) != `"staging-collector-101.newrelic.com"` { + t.Fatal(string(reply)) + } +} + +func TestEmptyHash(t *testing.T) { + reply, err := parseResponse([]byte(`{}`)) + if nil != err { + t.Fatal(err) + } + if nil != reply { + t.Fatal(string(reply)) + } +} + +func TestReturnValueNull(t *testing.T) { + reply, err := parseResponse([]byte(`{"return_value":null}`)) + if nil != err { + t.Fatal(err) + } + if "null" != string(reply) { + t.Fatal(string(reply)) + } +} + +func TestReplyNull(t *testing.T) { + reply, err := parseResponse(nil) + + if nil == err || err.Error() != `unexpected end of JSON input` { + t.Fatal(err) + } + if nil != reply { + t.Fatal(string(reply)) + } +} + +func TestConnectSuccess(t *testing.T) { + inner := `{ + "agent_run_id":"599551769342729", + "product_level":40, + "js_agent_file":"", + "cross_process_id":"12345#12345", + "collect_errors":true, + "url_rules":[ + { + "each_segment":false, + "match_expression":".*\\.(txt|udl|plist|css)$", + "eval_order":1000, + "replace_all":false, + "ignore":false, + "terminate_chain":true, + "replacement":"\/*.\\1" + }, + { + "each_segment":true, + "match_expression":"^[0-9][0-9a-f_,.-]*$", + "eval_order":1001, + "replace_all":false, + "ignore":false, + "terminate_chain":false, + "replacement":"*" + } + ], + "messages":[ + { + "message":"Reporting to staging", + "level":"INFO" + } + ], + "data_report_period":60, + "collect_traces":true, + "sampling_rate":0, + "js_agent_loader":"", + "encoding_key":"the-encoding-key", + "apdex_t":0.5, + "collect_analytics_events":true, + "trusted_account_ids":[49402] +}` + outer := `{"return_value":` + inner + `}` + reply, err := parseResponse([]byte(outer)) + + if nil != err { + t.Fatal(err) + } + if string(reply) != inner { + t.Fatal(string(reply)) + } +} + +func TestClientError(t *testing.T) { + r := `{"exception":{"message":"something","error_type":"my_error"}}` + reply, err := parseResponse([]byte(r)) + if nil == err || err.Error() != "my_error: something" { + t.Fatal(err) + } + if nil != reply { + t.Fatal(string(reply)) + } +} + +func TestForceRestartException(t *testing.T) { + // NOTE: This string was generated manually, not taken from the actual + // collector. + r := CompactJSONString(`{ + "exception":{ + "message":"something", + "error_type":"NewRelic::Agent::ForceRestartException" + } + }`) + reply, err := parseResponse([]byte(r)) + if reply != nil { + t.Fatal(string(reply)) + } + if !IsRestartException(err) { + t.Fatal(err) + } +} + +func TestForceDisconnectException(t *testing.T) { + // NOTE: This string was generated manually, not taken from the actual + // collector. + r := CompactJSONString(`{ + "exception":{ + "message":"something", + "error_type":"NewRelic::Agent::ForceDisconnectException" + } + }`) + reply, err := parseResponse([]byte(r)) + if reply != nil { + t.Fatal(string(reply)) + } + if !IsDisconnect(err) { + t.Fatal(err) + } +} + +func TestRuntimeError(t *testing.T) { + // NOTE: This string was generated manually, not taken from the actual + // collector. + r := `{"exception":{"message":"something","error_type":"RuntimeError"}}` + reply, err := parseResponse([]byte(r)) + if reply != nil { + t.Fatal(string(reply)) + } + if !IsRuntime(err) { + t.Fatal(err) + } +} + +func TestUnknownError(t *testing.T) { + r := `{"exception":{"message":"something","error_type":"unknown_type"}}` + reply, err := parseResponse([]byte(r)) + if reply != nil { + t.Fatal(string(reply)) + } + if nil == err || err.Error() != "unknown_type: something" { + t.Fatal(err) + } +} + +func TestUrl(t *testing.T) { + cmd := RpmCmd{ + Name: "foo_method", + Collector: "example.com", + } + cs := RpmControls{ + UseTLS: true, + License: "123abc", + Client: nil, + Logger: nil, + AgentVersion: "1", + } + + out := rpmURL(cmd, cs) + u, err := url.Parse(out) + if err != nil { + t.Fatalf("url.Parse(%q) = %q", out, err) + } + + got := u.Query().Get("license_key") + if got != cs.License { + t.Errorf("got=%q cmd.License=%q", got, cs.License) + } + if u.Scheme != "https" { + t.Error(u.Scheme) + } +} + +const ( + redirectBody = `{"return_value":"special_collector"}` + connectBody = `{"return_value":{"agent_run_id":"my_agent_run_id"}}` + disconnectBody = `{"exception":{"error_type":"NewRelic::Agent::ForceDisconnectException"}}` + licenseBody = `{"exception":{"error_type":"NewRelic::Agent::LicenseException"}}` + malformedBody = `{"return_value":}}` +) + +func makeResponse(code int, body string) *http.Response { + return &http.Response{ + StatusCode: code, + Body: ioutil.NopCloser(strings.NewReader(body)), + } +} + +type endpointResult struct { + response *http.Response + err error +} + +type connectMockRoundTripper struct { + redirect endpointResult + connect endpointResult +} + +func (m connectMockRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + cmd := r.URL.Query().Get("method") + switch cmd { + case cmdRedirect: + return m.redirect.response, m.redirect.err + case cmdConnect: + return m.connect.response, m.connect.err + default: + return nil, fmt.Errorf("unknown cmd: %s", cmd) + } +} + +func (m connectMockRoundTripper) CancelRequest(req *http.Request) {} + +func testConnectHelper(transport http.RoundTripper) (*AppRun, error) { + cs := RpmControls{ + UseTLS: true, + License: "12345", + Client: &http.Client{Transport: transport}, + Logger: logger.ShimLogger{}, + AgentVersion: "1", + } + + return ConnectAttempt([]byte(`"connect-json"`), "redirect-host", cs) +} + +func TestConnectAttemptSuccess(t *testing.T) { + run, err := testConnectHelper(connectMockRoundTripper{ + redirect: endpointResult{response: makeResponse(200, redirectBody)}, + connect: endpointResult{response: makeResponse(200, connectBody)}, + }) + if nil == run || nil != err { + t.Fatal(run, err) + } + if run.Collector != "special_collector" { + t.Error(run.Collector) + } + if run.RunID != "my_agent_run_id" { + t.Error(run) + } +} + +func TestConnectAttemptDisconnectOnRedirect(t *testing.T) { + run, err := testConnectHelper(connectMockRoundTripper{ + redirect: endpointResult{response: makeResponse(200, disconnectBody)}, + connect: endpointResult{response: makeResponse(200, connectBody)}, + }) + if nil != run { + t.Error(run) + } + if !IsDisconnect(err) { + t.Fatal(err) + } +} + +func TestConnectAttemptDisconnectOnConnect(t *testing.T) { + run, err := testConnectHelper(connectMockRoundTripper{ + redirect: endpointResult{response: makeResponse(200, redirectBody)}, + connect: endpointResult{response: makeResponse(200, disconnectBody)}, + }) + if nil != run { + t.Error(run) + } + if !IsDisconnect(err) { + t.Fatal(err) + } +} + +func TestConnectAttemptLicenseExceptionOnRedirect(t *testing.T) { + run, err := testConnectHelper(connectMockRoundTripper{ + redirect: endpointResult{response: makeResponse(200, licenseBody)}, + connect: endpointResult{response: makeResponse(200, connectBody)}, + }) + if nil != run { + t.Error(run) + } + if !IsLicenseException(err) { + t.Fatal(err) + } +} + +func TestConnectAttemptLicenseExceptionOnConnect(t *testing.T) { + run, err := testConnectHelper(connectMockRoundTripper{ + redirect: endpointResult{response: makeResponse(200, redirectBody)}, + connect: endpointResult{response: makeResponse(200, licenseBody)}, + }) + if nil != run { + t.Error(run) + } + if !IsLicenseException(err) { + t.Fatal(err) + } +} + +func TestConnectAttemptInvalidJSON(t *testing.T) { + run, err := testConnectHelper(connectMockRoundTripper{ + redirect: endpointResult{response: makeResponse(200, redirectBody)}, + connect: endpointResult{response: makeResponse(200, malformedBody)}, + }) + if nil != run { + t.Error(run) + } + if nil == err { + t.Fatal("missing error") + } +} + +func TestConnectAttemptCollectorNotString(t *testing.T) { + run, err := testConnectHelper(connectMockRoundTripper{ + redirect: endpointResult{response: makeResponse(200, `{"return_value":123}`)}, + connect: endpointResult{response: makeResponse(200, connectBody)}, + }) + if nil != run { + t.Error(run) + } + if nil == err { + t.Fatal("missing error") + } +} + +func TestConnectAttempt401(t *testing.T) { + run, err := testConnectHelper(connectMockRoundTripper{ + redirect: endpointResult{response: makeResponse(200, redirectBody)}, + connect: endpointResult{response: makeResponse(401, connectBody)}, + }) + if nil != run { + t.Error(run) + } + if err != ErrUnauthorized { + t.Fatal(err) + } +} + +func TestConnectAttempt413(t *testing.T) { + run, err := testConnectHelper(connectMockRoundTripper{ + redirect: endpointResult{response: makeResponse(200, redirectBody)}, + connect: endpointResult{response: makeResponse(413, connectBody)}, + }) + if nil != run { + t.Error(run) + } + if err != ErrPayloadTooLarge { + t.Fatal(err) + } +} + +func TestConnectAttempt415(t *testing.T) { + run, err := testConnectHelper(connectMockRoundTripper{ + redirect: endpointResult{response: makeResponse(200, redirectBody)}, + connect: endpointResult{response: makeResponse(415, connectBody)}, + }) + if nil != run { + t.Error(run) + } + if err != ErrUnsupportedMedia { + t.Fatal(err) + } +} + +func TestConnectAttemptUnexpectedCode(t *testing.T) { + run, err := testConnectHelper(connectMockRoundTripper{ + redirect: endpointResult{response: makeResponse(200, redirectBody)}, + connect: endpointResult{response: makeResponse(404, connectBody)}, + }) + if nil != run { + t.Error(run) + } + if _, ok := err.(unexpectedStatusCodeErr); !ok { + t.Fatal(err) + } +} + +func TestConnectAttemptUnexpectedError(t *testing.T) { + run, err := testConnectHelper(connectMockRoundTripper{ + redirect: endpointResult{response: makeResponse(200, redirectBody)}, + connect: endpointResult{err: errors.New("unexpected error")}, + }) + if nil != run { + t.Error(run) + } + if nil == err { + t.Fatal("missing error") + } +} + +func TestConnectAttemptMissingRunID(t *testing.T) { + run, err := testConnectHelper(connectMockRoundTripper{ + redirect: endpointResult{response: makeResponse(200, redirectBody)}, + connect: endpointResult{response: makeResponse(200, `{"return_value":{}}`)}, + }) + if nil != run { + t.Error(run) + } + if nil == err { + t.Fatal("missing error") + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/compress.go b/vendor/github.com/newrelic/go-agent/internal/compress.go new file mode 100644 index 00000000..59fc7ced --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/compress.go @@ -0,0 +1,19 @@ +package internal + +import ( + "bytes" + "compress/zlib" +) + +func compress(b []byte) (*bytes.Buffer, error) { + var buf bytes.Buffer + w := zlib.NewWriter(&buf) + _, err := w.Write(b) + w.Close() + + if nil != err { + return nil, err + } + + return &buf, nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/connect_reply.go b/vendor/github.com/newrelic/go-agent/internal/connect_reply.go new file mode 100644 index 00000000..3eddd7df --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/connect_reply.go @@ -0,0 +1,114 @@ +package internal + +import ( + "strings" + "time" +) + +// AgentRunID identifies the current connection with the collector. +type AgentRunID string + +func (id AgentRunID) String() string { + return string(id) +} + +// AppRun contains information regarding a single connection session with the +// collector. It is created upon application connect and is afterwards +// immutable. +type AppRun struct { + *ConnectReply + Collector string +} + +// ConnectReply contains all of the settings and state send down from the +// collector. It should not be modified after creation. +type ConnectReply struct { + RunID AgentRunID `json:"agent_run_id"` + + // Transaction Name Modifiers + SegmentTerms segmentRules `json:"transaction_segment_terms"` + TxnNameRules metricRules `json:"transaction_name_rules"` + URLRules metricRules `json:"url_rules"` + MetricRules metricRules `json:"metric_name_rules"` + + // Cross Process + EncodingKey string `json:"encoding_key"` + CrossProcessID string `json:"cross_process_id"` + TrustedAccounts []int `json:"trusted_account_ids"` + + // Settings + KeyTxnApdex map[string]float64 `json:"web_transactions_apdex"` + ApdexThresholdSeconds float64 `json:"apdex_t"` + CollectAnalyticsEvents bool `json:"collect_analytics_events"` + CollectCustomEvents bool `json:"collect_custom_events"` + CollectTraces bool `json:"collect_traces"` + CollectErrors bool `json:"collect_errors"` + CollectErrorEvents bool `json:"collect_error_events"` + + // RUM + AgentLoader string `json:"js_agent_loader"` + Beacon string `json:"beacon"` + BrowserKey string `json:"browser_key"` + AppID string `json:"application_id"` + ErrorBeacon string `json:"error_beacon"` + JSAgentFile string `json:"js_agent_file"` + + Messages []struct { + Message string `json:"message"` + Level string `json:"level"` + } `json:"messages"` +} + +// ConnectReplyDefaults returns a newly allocated ConnectReply with the proper +// default settings. A pointer to a global is not used to prevent consumers +// from changing the default settings. +func ConnectReplyDefaults() *ConnectReply { + return &ConnectReply{ + ApdexThresholdSeconds: 0.5, + CollectAnalyticsEvents: true, + CollectCustomEvents: true, + CollectTraces: true, + CollectErrors: true, + CollectErrorEvents: true, + } +} + +// CalculateApdexThreshold calculates the apdex threshold. +func CalculateApdexThreshold(c *ConnectReply, txnName string) time.Duration { + if t, ok := c.KeyTxnApdex[txnName]; ok { + return floatSecondsToDuration(t) + } + return floatSecondsToDuration(c.ApdexThresholdSeconds) +} + +// CreateFullTxnName uses collector rules and the appropriate metric prefix to +// construct the full transaction metric name from the name given by the +// consumer. +func CreateFullTxnName(input string, reply *ConnectReply, isWeb bool) string { + var afterURLRules string + if "" != input { + afterURLRules = reply.URLRules.Apply(input) + if "" == afterURLRules { + return "" + } + } + + prefix := backgroundMetricPrefix + if isWeb { + prefix = webMetricPrefix + } + + var beforeNameRules string + if strings.HasPrefix(afterURLRules, "/") { + beforeNameRules = prefix + afterURLRules + } else { + beforeNameRules = prefix + "/" + afterURLRules + } + + afterNameRules := reply.TxnNameRules.Apply(beforeNameRules) + if "" == afterNameRules { + return "" + } + + return reply.SegmentTerms.apply(afterNameRules) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/connect_reply_test.go b/vendor/github.com/newrelic/go-agent/internal/connect_reply_test.go new file mode 100644 index 00000000..7de8a4d0 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/connect_reply_test.go @@ -0,0 +1,110 @@ +package internal + +import ( + "encoding/json" + "testing" + "time" +) + +func TestCreateFullTxnNameBasic(t *testing.T) { + emptyReply := ConnectReplyDefaults() + + tcs := []struct { + input string + background bool + expect string + }{ + {"", true, "WebTransaction/Go/"}, + {"/", true, "WebTransaction/Go/"}, + {"hello", true, "WebTransaction/Go/hello"}, + {"/hello", true, "WebTransaction/Go/hello"}, + + {"", false, "OtherTransaction/Go/"}, + {"/", false, "OtherTransaction/Go/"}, + {"hello", false, "OtherTransaction/Go/hello"}, + {"/hello", false, "OtherTransaction/Go/hello"}, + } + + for _, tc := range tcs { + if out := CreateFullTxnName(tc.input, emptyReply, tc.background); out != tc.expect { + t.Error(tc.input, tc.background, out, tc.expect) + } + } +} + +func TestCreateFullTxnNameURLRulesIgnore(t *testing.T) { + js := `[{ + "match_expression":".*zip.*$", + "ignore":true + }]` + reply := ConnectReplyDefaults() + err := json.Unmarshal([]byte(js), &reply.URLRules) + if nil != err { + t.Fatal(err) + } + if out := CreateFullTxnName("/zap/zip/zep", reply, true); out != "" { + t.Error(out) + } +} + +func TestCreateFullTxnNameTxnRulesIgnore(t *testing.T) { + js := `[{ + "match_expression":"^WebTransaction/Go/zap/zip/zep$", + "ignore":true + }]` + reply := ConnectReplyDefaults() + err := json.Unmarshal([]byte(js), &reply.TxnNameRules) + if nil != err { + t.Fatal(err) + } + if out := CreateFullTxnName("/zap/zip/zep", reply, true); out != "" { + t.Error(out) + } +} + +func TestCreateFullTxnNameAllRules(t *testing.T) { + js := `{ + "url_rules":[ + {"match_expression":"zip","each_segment":true,"replacement":"zoop"} + ], + "transaction_name_rules":[ + {"match_expression":"WebTransaction/Go/zap/zoop/zep", + "replacement":"WebTransaction/Go/zap/zoop/zep/zup/zyp"} + ], + "transaction_segment_terms":[ + {"prefix": "WebTransaction/Go/", + "terms": ["zyp", "zoop", "zap"]} + ] + }` + reply := ConnectReplyDefaults() + err := json.Unmarshal([]byte(js), &reply) + if nil != err { + t.Fatal(err) + } + if out := CreateFullTxnName("/zap/zip/zep", reply, true); out != "WebTransaction/Go/zap/zoop/*/zyp" { + t.Error(out) + } +} + +func TestCalculateApdexThreshold(t *testing.T) { + reply := ConnectReplyDefaults() + threshold := CalculateApdexThreshold(reply, "WebTransaction/Go/hello") + if threshold != 500*time.Millisecond { + t.Error("default apdex threshold", threshold) + } + + reply = ConnectReplyDefaults() + reply.ApdexThresholdSeconds = 1.3 + reply.KeyTxnApdex = map[string]float64{ + "WebTransaction/Go/zip": 2.2, + "WebTransaction/Go/zap": 2.3, + } + threshold = CalculateApdexThreshold(reply, "WebTransaction/Go/hello") + if threshold != 1300*time.Millisecond { + t.Error(threshold) + } + threshold = CalculateApdexThreshold(reply, "WebTransaction/Go/zip") + if threshold != 2200*time.Millisecond { + t.Error(threshold) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/README.md b/vendor/github.com/newrelic/go-agent/internal/crossagent/README.md new file mode 100644 index 00000000..85d14db0 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/README.md @@ -0,0 +1,3 @@ +# Cross Agent Tests + +At commit ab3dd272f5fd2dd1e0feed351f1fb4b4b646cab2. diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/README.md b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/README.md new file mode 100644 index 00000000..63e441a3 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/README.md @@ -0,0 +1,34 @@ +# Cross Agent Tests + +### Data Policy + +None of these tests should contain customer data such as SQL strings. +Please be careful when adding new tests from real world failures. + +### Access + +Push access to this repository is granted via membership in the cross-agent-team GHE group. Contact Belinda Runkle if you are on the agent team but don't have push access. + +### Tests + +| Test Files | Description | +| ------------- |-------------| +| [rum_loader_insertion_location](rum_loader_insertion_location) | Describe where the RUM loader (formerly known as header) should be inserted. | +| [rum_footer_insertion_location](rum_footer_insertion_location) | Describe where the RUM footer (aka "client config") should be inserted. These tests do not apply to agents which insert the footer directly after the loader. | +| [rules.json](rules.json) | Describe how url/metric/txn-name rules should be applied. | +| [rum_client_config.json](rum_client_config.json) | These tests dictate the format and contents of the browser monitoring client configuration. For more information see: [SPEC](https://newrelic.atlassian.net/wiki/display/eng/BAM+Agent+Auto-Instrumentation) | +| [sql_parsing.json](sql_parsing.json) | These tests show how an SQL string should be parsed for the operation and table name. | +| [url_clean.json](url_clean.json) | These tests show how URLs should be cleaned before putting them into a trace segment's parameter hash (under the key 'uri'). | +| [url_domain_extraction.json](url_domain_extraction.json) | These tests show how the domain of a URL should be extracted (for the purpose of creating external metrics). | +| [postgres_explain_obfuscation](postgres_explain_obfuscation) | These tests show how plain-text explain plan output from PostgreSQL should be obfuscated when SQL obfuscation is enabled. | +| [sql_obfuscation](sql_obfuscation) | Describe how agents should obfuscate SQL queries before transmission to the collector. | +| [attribute_configuration](attribute_configuration.json) | These tests show how agents should respond to the various attribute configuration settings. For more information see: [Attributes SPEC](https://source.datanerd.us/agents/agent-specs/blob/master/Agent-Attributes-PORTED.md) | +| [cat](cat) | These tests cover the new Dirac attributes that are added for the CAT Map project. See the [CAT Spec](https://source.datanerd.us/agents/agent-specs/blob/master/Cross-Application-Tracing-PORTED.md) and the [README](cat/README.md) for details.| +| [labels](labels.json) | These tests cover the Labels for Language Agents project. See the [Labels for Language Agents Spec](https://newrelic.atlassian.net/wiki/display/eng/Labels+for+Language+Agents) for details.| +| [proc_cpuinfo](proc_cpuinfo) | These test correct processing of `/proc/cpuinfo` output on Linux hosts. | +| [proc_meminfo](proc_meminfo) | These test correct processing of `/proc/meminfo` output on Linux hosts. | +| [transaction_segment_terms.json](transaction_segment_terms.json) | These tests cover agent implementations of the `transaction_segment_terms` transaction renaming rules introduced in collector protocol 14. See [the spec](https://newrelic.atlassian.net/wiki/display/eng/Language+agent+transaction+segment+terms+rules) for details. | +| [synthetics](synthetics) | These tests cover agent support for Synthetics. For details, see [Agent Support for Synthetics: Forced Transaction Traces and Analytic Events](https://source.datanerd.us/agents/agent-specs/blob/master/Synthetics-PORTED.md). | +| [docker_container_id](docker_container_id) | These tests cover parsing of Docker container IDs from `/proc/*/cgroup` on Linux hosts. | +| [aws](aws.json) | These tests cover the collection and validation of AWS metadata for the [utilization draft spec](https://source.datanerd.us/agents/agent-specs/blob/master/Utilization.md). | +| [data_transport](data_transport/data_transport.json) | These test correct JSON payload generation and handling collector responses. See [readme](https://source.datanerd.us/agents/cross_agent_tests/blob/master/data_transport/data_transport.md) for details. | diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/cat/README.md b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/cat/README.md new file mode 100644 index 00000000..60a88c1d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/cat/README.md @@ -0,0 +1,28 @@ +### CAT Map test details + +The CAT map test cases in `cat_map.json` are meant to be used to verify the +attributes that agents collect and attach to analytics transaction events for +the CAT map project. + +**NOTE** currently `nr.apdexPerfZone` is not covered by these tests, make sure you test for this yourself until it is added to these tests. + +Each test case should correspond to a simulated transaction in the agent under +test. Here's what the various fields in each test case mean: + +| Name | Meaning | +| ---- | ------- | +| `name` | A human-meaningful name for the test case. | +| `appName` | The name of the New Relic application for the simulated transaction. | +| `transactionName` | The final name of the simulated transaction. | +| `transactionGuid` | The GUID of the simulated transaction. | +| `inboundPayload` | The (non-serialized) contents of the `X-NewRelic-Transaction` HTTP request header on the simulated transaction. Note that this value should be serialized to JSON, obfuscated using the CAT obfuscation algorithm, and Base64-encoded before being used in the header value. Note also that the `X-NewRelic-ID` header should be set on the simulated transaction, though its value is not specified in these tests. | +| `expectedIntrinsicFields` | A set of key-value pairs that are expected to be present in the analytics event generated for the simulated transaction. These fields should be present in the first hash of the analytic event payload (built-in agent-supplied fields). | +| `nonExpectedIntrinsicFields` | An array of attribute names that should *not* be present in the analytics event generated for the simulated transaction. | +| `outboundRequests` | An array of objects representing outbound requests that should be made in the context of the simulated transaction. See the table below for details. Only present if the test case involves making outgoing requests from the simulated transaction. | + +Here's what the fields of each entry in the `outboundRequests` array mean: + +| Name | Meaning | +| ---- | ------- | +| `outboundTxnName` | The name of the simulated transaction at the time this outbound request is made. Your test driver should set the transaction name to this value prior to simulating the outbound request. | +| `expectedOutboundPayload` | The expected (un-obfuscated) content of the outbound `X-NewRelic-Transaction` request header for this request. | diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/data_transport/data_transport.md b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/data_transport/data_transport.md new file mode 100644 index 00000000..01db1be8 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/data_transport/data_transport.md @@ -0,0 +1,35 @@ +What +---- + +The Data Transport Tests are meant to validate your agent's data transport layer (the part of the code that sends requests to the collector and interprets the responses). These tests are meant to be consumable both by code (for automated tests) and by humans, such that we can treat these tests as a sort of spec for agent-collector communication. + +The basic gist is that each of these tests are just collections of steps. A step is one of the following: + + * An event that you need to induce your agent to do (such as generate a metric or do a harvest). + * An expectation that your agent should send a request (as a result of previous steps). + * A composite step, which is just several other steps grouped together to reduce repetition. + +### Types of steps (not an exhaustive list) ### + + * `event_agent_start` -- Represents the startup of the agent. Will contain a `payload` property that defines the startup configuration. + * `event_metric` -- Represents some event that would cause your agent to generate a metric (such as a page view, a database query, etc). + * `event_harvest_metrics` -- Represents some event that would cause your agent to harvest metrics (such as a harvest timer elapsing). + * `event_local_config_update` -- Represents a change to local config while the agent is running. + * `expect_request` -- Represents an expectation that a particular request should happen as a result of the previous events. + * Will sometimes contain a `payload` property that defines the expected serialized payload (if omitted, payload can be ignored). + * Expected payloads will sometimes contain wildcard tokens such as `"__ANY_FLOAT__"`. + * Will sometimes contain a `response_payload` property that defines the response that the test runner should give to the agent (if omitted, just send back any payload that makes your agent continue on happily). + * `expect_no_request` -- Represents an expectation that **no** request should happen as a result of the previous events. *Note that this expectation is redundant if your test runner follows that paradigm that every request that occurs must be explicitly called out by the test data.* + +### "But our agent doesn't do *xyz*!" ### + +It is inevitable that there will be conflicts in functionality between the various agents. As much as possible, these tests are written to be idealistically comprehensive -- that is, covering all behavior that a perfectly functioning agent should follow -- but flexible enough for agents to intentionally ignore components that either don't apply or are not yet supported. + +Examples: + + * **Discrepancy:** Agent does not send `agent_settings` command. + * **Solution:** Ignore `expect_request` steps with `"agent_settings"` as the command name. + * **Discrepancy:** Agent does not yet support custom events. + * **Solution:** Ignore any test that contains a `event_custom_event` step. + * **Discrepancy:** Agent request payloads looks significantly different than the expected payload due to special reasons X, Y, and Z. + * **Solution:** Pre-process all expected payloads to make them match your agent's goofy output. \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/datastores/README.md b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/datastores/README.md new file mode 100644 index 00000000..a38115b5 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/datastores/README.md @@ -0,0 +1,16 @@ +## Datastore instance tests + +The datastore instance tests provide attributes similar to what an agent could expect to find regarding a database configuration and specifies the expected [datastore instance metric](https://source.datanerd.us/agents/agent-specs/blob/master/Datastore-Metrics-PORTED.md#datastore-metric-namespace) that should be generated. The table below lists types attributes and whether will will always be included or optionally included in each test case. + +| Name | Present | Description | +|---|---|---| +| system_hostname | always | the hostname of the machine | +| db_hostname | sometimes | the hostname reported by the database adapter | +| product | always | the database product for this configuration +| port | sometimes | the port reported by the database adapter | +| unix_socket | sometimes |the path to a unix domain socket reported by a database adapter | +| database_path | sometimes |the path to a filesystem database | +| expected\_instance\_metric | always | the instance metric expected to be generated from the given attributes | + +## Implementing the test cases +The idea behind these test cases are that you are able to determine a set of configuration properties from a database connection, and based on those properties you should generate the `expected_instance_metric`. Sometimes the properties available are minimal and will mean that you will need to fall back to defaults to obtain some of the information. When there is missing information from a database adapter the guiding principle is to fill in the defaults when they can be inferred, but do not make guesses that could be incorrect or misleading. Some agents may have access to better data and may not need to make inferences. If this applies to your agent then many of these tests will not be applicable. diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/README.md b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/README.md new file mode 100644 index 00000000..0b69ba17 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/README.md @@ -0,0 +1,6 @@ +These tests cover parsing of Docker container IDs on Linux hosts out of +`/proc/self/cgroup` (or `/proc//cgroup` more generally). + +The `cases.json` file lists each filename in this directory containing +example `/proc/self/cgroup` content, and the expected Docker container ID that +should be parsed from that file. diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/docker-0.9.1.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/docker-0.9.1.txt new file mode 100644 index 00000000..dfc8b806 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/docker-0.9.1.txt @@ -0,0 +1,10 @@ +11:hugetlb:/ +10:perf_event:/ +9:blkio:/ +8:freezer:/ +7:devices:/docker/f37a7e4d17017e7bf774656b19ca4360c6cdc4951c86700a464101d0d9ce97ee +6:memory:/ +5:cpuacct:/ +4:cpu:/docker/f37a7e4d17017e7bf774656b19ca4360c6cdc4951c86700a464101d0d9ce97ee +3:cpuset:/ +2:name=systemd:/ \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/docker-1.0.0.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/docker-1.0.0.txt new file mode 100644 index 00000000..0e85d10c --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/docker-1.0.0.txt @@ -0,0 +1,10 @@ +11:hugetlb:/ +10:perf_event:/docker/3ccfa00432798ff38f85839de1e396f771b4acbe9f4ddea0a761c39b9790a782 +9:blkio:/docker/3ccfa00432798ff38f85839de1e396f771b4acbe9f4ddea0a761c39b9790a782 +8:freezer:/docker/3ccfa00432798ff38f85839de1e396f771b4acbe9f4ddea0a761c39b9790a782 +7:devices:/docker/3ccfa00432798ff38f85839de1e396f771b4acbe9f4ddea0a761c39b9790a782 +6:memory:/docker/3ccfa00432798ff38f85839de1e396f771b4acbe9f4ddea0a761c39b9790a782 +5:cpuacct:/docker/3ccfa00432798ff38f85839de1e396f771b4acbe9f4ddea0a761c39b9790a782 +4:cpu:/docker/3ccfa00432798ff38f85839de1e396f771b4acbe9f4ddea0a761c39b9790a782 +3:cpuset:/ +2:name=systemd:/ diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/docker-1.1.2-lxc-driver.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/docker-1.1.2-lxc-driver.txt new file mode 100644 index 00000000..f2c3d3dc --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/docker-1.1.2-lxc-driver.txt @@ -0,0 +1,10 @@ +11:hugetlb:/lxc/cb8c113e5f3cf8332f5231f8154adc429ea910f7c29995372de4f571c55d3159 +10:perf_event:/lxc/cb8c113e5f3cf8332f5231f8154adc429ea910f7c29995372de4f571c55d3159 +9:blkio:/lxc/cb8c113e5f3cf8332f5231f8154adc429ea910f7c29995372de4f571c55d3159 +8:freezer:/lxc/cb8c113e5f3cf8332f5231f8154adc429ea910f7c29995372de4f571c55d3159 +7:name=systemd:/lxc/cb8c113e5f3cf8332f5231f8154adc429ea910f7c29995372de4f571c55d3159 +6:devices:/lxc/cb8c113e5f3cf8332f5231f8154adc429ea910f7c29995372de4f571c55d3159 +5:memory:/lxc/cb8c113e5f3cf8332f5231f8154adc429ea910f7c29995372de4f571c55d3159 +4:cpuacct:/lxc/cb8c113e5f3cf8332f5231f8154adc429ea910f7c29995372de4f571c55d3159 +3:cpu:/lxc/cb8c113e5f3cf8332f5231f8154adc429ea910f7c29995372de4f571c55d3159 +2:cpuset:/lxc/cb8c113e5f3cf8332f5231f8154adc429ea910f7c29995372de4f571c55d3159 \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/docker-1.1.2-native-driver-fs.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/docker-1.1.2-native-driver-fs.txt new file mode 100644 index 00000000..e29f420d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/docker-1.1.2-native-driver-fs.txt @@ -0,0 +1,10 @@ +11:hugetlb:/ +10:perf_event:/docker/2a4f870e24a3b52eb9fe7f3e02858c31855e213e568cfa6c76cb046ffa5b8a28 +9:blkio:/docker/2a4f870e24a3b52eb9fe7f3e02858c31855e213e568cfa6c76cb046ffa5b8a28 +8:freezer:/docker/2a4f870e24a3b52eb9fe7f3e02858c31855e213e568cfa6c76cb046ffa5b8a28 +7:name=systemd:/ +6:devices:/docker/2a4f870e24a3b52eb9fe7f3e02858c31855e213e568cfa6c76cb046ffa5b8a28 +5:memory:/docker/2a4f870e24a3b52eb9fe7f3e02858c31855e213e568cfa6c76cb046ffa5b8a28 +4:cpuacct:/docker/2a4f870e24a3b52eb9fe7f3e02858c31855e213e568cfa6c76cb046ffa5b8a28 +3:cpu:/docker/2a4f870e24a3b52eb9fe7f3e02858c31855e213e568cfa6c76cb046ffa5b8a28 +2:cpuset:/ \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/docker-1.1.2-native-driver-systemd.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/docker-1.1.2-native-driver-systemd.txt new file mode 100644 index 00000000..7e7e1ce0 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/docker-1.1.2-native-driver-systemd.txt @@ -0,0 +1,10 @@ +10:hugetlb:/ +9:perf_event:/ +8:blkio:/system.slice/docker-67f98c9e6188f9c1818672a15dbe46237b6ee7e77f834d40d41c5fb3c2f84a2f.scope +7:net_cls:/ +6:freezer:/system.slice/docker-67f98c9e6188f9c1818672a15dbe46237b6ee7e77f834d40d41c5fb3c2f84a2f.scope +5:devices:/system.slice/docker-67f98c9e6188f9c1818672a15dbe46237b6ee7e77f834d40d41c5fb3c2f84a2f.scope +4:memory:/system.slice/docker-67f98c9e6188f9c1818672a15dbe46237b6ee7e77f834d40d41c5fb3c2f84a2f.scope +3:cpuacct,cpu:/system.slice/docker-67f98c9e6188f9c1818672a15dbe46237b6ee7e77f834d40d41c5fb3c2f84a2f.scope +2:cpuset:/ +1:name=systemd:/system.slice/docker-67f98c9e6188f9c1818672a15dbe46237b6ee7e77f834d40d41c5fb3c2f84a2f.scope \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/docker-1.3.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/docker-1.3.txt new file mode 100644 index 00000000..1ccd4434 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/docker-1.3.txt @@ -0,0 +1,9 @@ +9:perf_event:/docker/47cbd16b77c50cbf71401c069cd2189f0e659af17d5a2daca3bddf59d8a870b2 +8:blkio:/docker/47cbd16b77c50cbf71401c069cd2189f0e659af17d5a2daca3bddf59d8a870b2 +7:net_cls:/ +6:freezer:/docker/47cbd16b77c50cbf71401c069cd2189f0e659af17d5a2daca3bddf59d8a870b2 +5:devices:/docker/47cbd16b77c50cbf71401c069cd2189f0e659af17d5a2daca3bddf59d8a870b2 +4:memory:/docker/47cbd16b77c50cbf71401c069cd2189f0e659af17d5a2daca3bddf59d8a870b2 +3:cpuacct:/docker/47cbd16b77c50cbf71401c069cd2189f0e659af17d5a2daca3bddf59d8a870b2 +2:cpu:/docker/47cbd16b77c50cbf71401c069cd2189f0e659af17d5a2daca3bddf59d8a870b2 +1:cpuset:/ diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/empty.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/empty.txt new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/heroku.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/heroku.txt new file mode 100644 index 00000000..7a1ae0d7 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/heroku.txt @@ -0,0 +1 @@ +1:hugetlb,perf_event,blkio,freezer,devices,memory,cpuacct,cpu,cpuset:/lxc/b6d196c1-50f2-4949-abdb-5d4909864487 diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/invalid-characters.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/invalid-characters.txt new file mode 100644 index 00000000..4ff2f23f --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/invalid-characters.txt @@ -0,0 +1,9 @@ +9:perf_event:/docker/WRONGINCORRECTINVALIDCHARSERRONEOUSBADPHONYBROKEN2TERRIBLENOPE55 +8:blkio:/docker/WRONGINCORRECTINVALIDCHARSERRONEOUSBADPHONYBROKEN2TERRIBLENOPE55 +7:net_cls:/ +6:freezer:/docker/WRONGINCORRECTINVALIDCHARSERRONEOUSBADPHONYBROKEN2TERRIBLENOPE55 +5:devices:/docker/WRONGINCORRECTINVALIDCHARSERRONEOUSBADPHONYBROKEN2TERRIBLENOPE55 +4:memory:/docker/WRONGINCORRECTINVALIDCHARSERRONEOUSBADPHONYBROKEN2TERRIBLENOPE55 +3:cpuacct:/docker/WRONGINCORRECTINVALIDCHARSERRONEOUSBADPHONYBROKEN2TERRIBLENOPE55 +2:cpu:/docker/WRONGINCORRECTINVALIDCHARSERRONEOUSBADPHONYBROKEN2TERRIBLENOPE55 +1:cpuset:/ diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/invalid-length.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/invalid-length.txt new file mode 100644 index 00000000..8166b21f --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/invalid-length.txt @@ -0,0 +1,9 @@ +9:perf_event:/docker/47cbd16b77c5 +8:blkio:/docker/47cbd16b77c5 +7:net_cls:/ +6:freezer:/docker/47cbd16b77c5 +5:devices:/docker/47cbd16b77c5 +4:memory:/docker/47cbd16b77c5 +3:cpuacct:/docker/47cbd16b77c5 +2:cpu:/docker/47cbd16b77c5 +1:cpuset:/ diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/ubuntu-14.04-lxc-container.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/ubuntu-14.04-lxc-container.txt new file mode 100644 index 00000000..8c5b635a --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/ubuntu-14.04-lxc-container.txt @@ -0,0 +1,10 @@ +11:hugetlb:/lxc/p1 +10:perf_event:/lxc/p1 +9:blkio:/lxc/p1 +8:freezer:/lxc/p1 +7:devices:/lxc/p1 +6:memory:/lxc/p1 +5:cpuacct:/lxc/p1 +4:cpu:/lxc/p1 +3:cpuset:/lxc/p1 +2:name=systemd:/user/1000.user/1.session \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/ubuntu-14.04-no-container.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/ubuntu-14.04-no-container.txt new file mode 100644 index 00000000..4439bc55 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/ubuntu-14.04-no-container.txt @@ -0,0 +1,10 @@ +11:hugetlb:/user/1000.user/2.session +10:perf_event:/user/1000.user/2.session +9:blkio:/user/1000.user/2.session +8:freezer:/user/1000.user/2.session +7:devices:/user/1000.user/2.session +6:memory:/user/1000.user/2.session +5:cpuacct:/user/1000.user/2.session +4:cpu:/user/1000.user/2.session +3:cpuset:/user/1000.user/2.session +2:name=systemd:/user/1000.user/2.session \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/ubuntu-14.10-no-container.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/ubuntu-14.10-no-container.txt new file mode 100644 index 00000000..fc987364 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/docker_container_id/ubuntu-14.10-no-container.txt @@ -0,0 +1,10 @@ +10:hugetlb:/ +9:perf_event:/ +8:blkio:/ +7:net_cls,net_prio:/ +6:freezer:/ +5:devices:/ +4:memory:/ +3:cpu,cpuacct:/ +2:cpuset:/ +1:name=systemd:/user.slice/user-1000.slice/session-2.scope \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/README.md b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/README.md new file mode 100644 index 00000000..f839f9e6 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/README.md @@ -0,0 +1,16 @@ +# PostgreSQL explain plan obfuscation tests + +These tests show how explain plans for PostgreSQL should be obfuscated when +SQL obfuscation is enabled. Obfuscation of explain plans for PostgreSQL is +necessary because they can include portions of the original query that may +contain sensitive data. + +Each test case consists of a set of files with the following extensions: + +* `.query.txt` - the original SQL query that is being explained +* `.explain.txt` - the raw un-obfuscated output from running `EXPLAIN ` +* `.colon_obfuscated.txt` - the desired obfuscated explain output if using the +default, more aggressive obfuscation strategy described [here](https://newrelic.atlassian.net/wiki/display/eng/Obfuscating+PostgreSQL+Explain+plans). +* `.obfuscated.txt` - the desired obfuscated explain output if using a more +accurate, less aggressive obfuscation strategy detailed in this +[Jive thread](https://newrelic.jiveon.com/thread/1851). diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/basic_where.colon_obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/basic_where.colon_obfuscated.txt new file mode 100644 index 00000000..7ec5fb0e --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/basic_where.colon_obfuscated.txt @@ -0,0 +1,3 @@ + Index Scan using blogs_pkey on blogs (cost=0.00..8.27 rows=1 width=540) + Index Cond: ? + Filter: ? \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/basic_where.explain.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/basic_where.explain.txt new file mode 100644 index 00000000..17756f23 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/basic_where.explain.txt @@ -0,0 +1,3 @@ + Index Scan using blogs_pkey on blogs (cost=0.00..8.27 rows=1 width=540) + Index Cond: (id = 1234) + Filter: ((title)::text = 'sensitive text'::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/basic_where.obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/basic_where.obfuscated.txt new file mode 100644 index 00000000..0302012b --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/basic_where.obfuscated.txt @@ -0,0 +1,3 @@ + Index Scan using blogs_pkey on blogs (cost=0.00..8.27 rows=1 width=540) + Index Cond: (id = ?) + Filter: ((title)::text = ?::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/basic_where.query.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/basic_where.query.txt new file mode 100644 index 00000000..98504f2b --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/basic_where.query.txt @@ -0,0 +1 @@ +SELECT * FROM blogs WHERE blogs.id=1234 AND blogs.title='sensitive text' \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/current_date.colon_obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/current_date.colon_obfuscated.txt new file mode 100644 index 00000000..39cf4175 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/current_date.colon_obfuscated.txt @@ -0,0 +1,2 @@ +Seq Scan on explain_plan_test_4 (cost=0.00..56.60 rows=1 width=5) + Filter: ? diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/current_date.explain.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/current_date.explain.txt new file mode 100644 index 00000000..8005a639 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/current_date.explain.txt @@ -0,0 +1,2 @@ +Seq Scan on explain_plan_test_4 (cost=0.00..56.60 rows=1 width=5) + Filter: ((j = 'a'::"char") AND (k = ('now'::cstring)::date)) diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/current_date.obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/current_date.obfuscated.txt new file mode 100644 index 00000000..d83600f8 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/current_date.obfuscated.txt @@ -0,0 +1,2 @@ +Seq Scan on explain_plan_test_4 (cost=0.00..56.60 rows=1 width=5) + Filter: ((j = ?::"char") AND (k = (?::cstring)::date)) diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/current_date.query.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/current_date.query.txt new file mode 100644 index 00000000..235ec0bb --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/current_date.query.txt @@ -0,0 +1 @@ +explain select * from explain_plan_test_4 where j = 'abcd' and k = current_date diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/date.colon_obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/date.colon_obfuscated.txt new file mode 100644 index 00000000..b3cafd41 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/date.colon_obfuscated.txt @@ -0,0 +1,2 @@ +Seq Scan on explain_plan_test_4 (cost=0.00..39.12 rows=12 width=5) + Filter: ? diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/date.explain.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/date.explain.txt new file mode 100644 index 00000000..dac8471e --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/date.explain.txt @@ -0,0 +1,2 @@ +Seq Scan on explain_plan_test_4 (cost=0.00..39.12 rows=12 width=5) + Filter: (k = '2001-09-28'::date) diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/date.obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/date.obfuscated.txt new file mode 100644 index 00000000..bbf32e47 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/date.obfuscated.txt @@ -0,0 +1,2 @@ +Seq Scan on explain_plan_test_4 (cost=0.00..39.12 rows=12 width=5) + Filter: (k = ?::date) diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/date.query.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/date.query.txt new file mode 100644 index 00000000..8b13eac4 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/date.query.txt @@ -0,0 +1 @@ +explain select * from explain_plan_test_4 where k = date '2001-09-28'" diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_newline.colon_obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_newline.colon_obfuscated.txt new file mode 100644 index 00000000..45c36b5e --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_newline.colon_obfuscated.txt @@ -0,0 +1,2 @@ +Seq Scan on blogs (cost=0.00..1.01 rows=1 width=540) + Filter: ? diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_newline.explain.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_newline.explain.txt new file mode 100644 index 00000000..6f154a24 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_newline.explain.txt @@ -0,0 +1,3 @@ +Seq Scan on blogs (cost=0.00..1.01 rows=1 width=540) + Filter: ((title)::text = '\x08\x0C + \r '::text) diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_newline.obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_newline.obfuscated.txt new file mode 100644 index 00000000..bca13233 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_newline.obfuscated.txt @@ -0,0 +1,2 @@ +Seq Scan on blogs (cost=0.00..1.01 rows=1 width=540) + Filter: ((title)::text = ?::text) diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_newline.query.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_newline.query.txt new file mode 100644 index 00000000..230411c5 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_newline.query.txt @@ -0,0 +1 @@ +select * from blogs where title = E'\x08\x0c\n\r\t' diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_quote.colon_obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_quote.colon_obfuscated.txt new file mode 100644 index 00000000..4a95be3e --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_quote.colon_obfuscated.txt @@ -0,0 +1,2 @@ +Seq Scan on explain_plan_test_1 (cost=0.00..24.50 rows=6 width=40) + Filter: ? diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_quote.explain.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_quote.explain.txt new file mode 100644 index 00000000..8b400e7d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_quote.explain.txt @@ -0,0 +1,2 @@ +Seq Scan on explain_plan_test_1 (cost=0.00..24.50 rows=6 width=40) + Filter: (c = 'three''three'::text) diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_quote.obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_quote.obfuscated.txt new file mode 100644 index 00000000..3b90e9c8 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_quote.obfuscated.txt @@ -0,0 +1,2 @@ +Seq Scan on explain_plan_test_1 (cost=0.00..24.50 rows=6 width=40) + Filter: (c = ?::text) diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_quote.query.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_quote.query.txt new file mode 100644 index 00000000..c3ddc49e --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/embedded_quote.query.txt @@ -0,0 +1 @@ +explain select * from explain_plan_test_1 where c = 'three''three' diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/floating_point.colon_obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/floating_point.colon_obfuscated.txt new file mode 100644 index 00000000..c76e41e9 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/floating_point.colon_obfuscated.txt @@ -0,0 +1,2 @@ +Seq Scan on explain_plan_test_1 (cost=0.00..27.40 rows=6 width=40) + Filter: ? diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/floating_point.explain.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/floating_point.explain.txt new file mode 100644 index 00000000..c9efd548 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/floating_point.explain.txt @@ -0,0 +1,2 @@ +Seq Scan on explain_plan_test_1 (cost=0.00..27.40 rows=6 width=40) + Filter: ((a)::numeric = 10000000000::numeric) diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/floating_point.obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/floating_point.obfuscated.txt new file mode 100644 index 00000000..7595d016 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/floating_point.obfuscated.txt @@ -0,0 +1,2 @@ +Seq Scan on explain_plan_test_1 (cost=0.00..27.40 rows=6 width=40) + Filter: ((a)::numeric = ?::numeric) diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/floating_point.query.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/floating_point.query.txt new file mode 100644 index 00000000..7f30e0b1 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/floating_point.query.txt @@ -0,0 +1 @@ +explain select * from explain_plan_test_1 where a = 1e10 diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/function_with_strings.colon_obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/function_with_strings.colon_obfuscated.txt new file mode 100644 index 00000000..b699b074 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/function_with_strings.colon_obfuscated.txt @@ -0,0 +1,5 @@ + Hash Join (cost=12.93..26.33 rows=130 width=1113) + Hash Cond: ? + -> Seq Scan on blogs (cost=0.00..11.40 rows=140 width=540) + -> Hash (cost=11.30..11.30 rows=130 width=573) + -> Seq Scan on posts (cost=0.00..11.30 rows=130 width=573) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/function_with_strings.explain.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/function_with_strings.explain.txt new file mode 100644 index 00000000..7b7b2fa6 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/function_with_strings.explain.txt @@ -0,0 +1,5 @@ + Hash Join (cost=12.93..26.33 rows=130 width=1113) + Hash Cond: (pg_catalog.concat(blogs.title, '-suffix') = (posts.title)::text) + -> Seq Scan on blogs (cost=0.00..11.40 rows=140 width=540) + -> Hash (cost=11.30..11.30 rows=130 width=573) + -> Seq Scan on posts (cost=0.00..11.30 rows=130 width=573) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/function_with_strings.obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/function_with_strings.obfuscated.txt new file mode 100644 index 00000000..ac0e0ee8 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/function_with_strings.obfuscated.txt @@ -0,0 +1,5 @@ + Hash Join (cost=12.93..26.33 rows=130 width=1113) + Hash Cond: (pg_catalog.concat(blogs.title, ?) = (posts.title)::text) + -> Seq Scan on blogs (cost=0.00..11.40 rows=140 width=540) + -> Hash (cost=11.30..11.30 rows=130 width=573) + -> Seq Scan on posts (cost=0.00..11.30 rows=130 width=573) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/function_with_strings.query.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/function_with_strings.query.txt new file mode 100644 index 00000000..be642a22 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/function_with_strings.query.txt @@ -0,0 +1 @@ +SELECT * FROM blogs JOIN posts ON posts.title=CONCAT(blogs.title, '-suffix') \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/quote_in_table_name.colon_obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/quote_in_table_name.colon_obfuscated.txt new file mode 100644 index 00000000..7e5d01c8 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/quote_in_table_name.colon_obfuscated.txt @@ -0,0 +1,2 @@ +Seq Scan on "explain_plan_test'_3" (cost=0.00..24.50 rows=6 width=40) + Filter: ? diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/quote_in_table_name.explain.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/quote_in_table_name.explain.txt new file mode 100644 index 00000000..ffd06faa --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/quote_in_table_name.explain.txt @@ -0,0 +1,2 @@ +Seq Scan on "explain_plan_test'_3" (cost=0.00..24.50 rows=6 width=40) + Filter: (i = '"abcd"'::text) diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/quote_in_table_name.obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/quote_in_table_name.obfuscated.txt new file mode 100644 index 00000000..cdfec9a8 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/quote_in_table_name.obfuscated.txt @@ -0,0 +1,2 @@ +Seq Scan on "explain_plan_test'_3" (cost=0.00..24.50 rows=6 width=40) + Filter: (i = ?::text) diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/quote_in_table_name.query.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/quote_in_table_name.query.txt new file mode 100644 index 00000000..01bfc59d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/quote_in_table_name.query.txt @@ -0,0 +1 @@ +explain select * from "explain_plan_test'_3" where i = '"abcd"' diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/subplan.colon_obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/subplan.colon_obfuscated.txt new file mode 100644 index 00000000..e2ad7da4 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/subplan.colon_obfuscated.txt @@ -0,0 +1,5 @@ +Insert on explain_plan_test_1 (cost=24.50..49.00 rows=580 width=40) + -> Seq Scan on explain_plan_test_2 (cost=24.50..49.00 rows=580 width=40) + Filter: ? + SubPlan 1 + -> Seq Scan on explain_plan_test_1 (cost=0.00..21.60 rows=1160 width=4) diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/subplan.explain.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/subplan.explain.txt new file mode 100644 index 00000000..3f6d93db --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/subplan.explain.txt @@ -0,0 +1,5 @@ +Insert on explain_plan_test_1 (cost=24.50..49.00 rows=580 width=40) + -> Seq Scan on explain_plan_test_2 (cost=24.50..49.00 rows=580 width=40) + Filter: (NOT (hashed SubPlan 1)) + SubPlan 1 + -> Seq Scan on explain_plan_test_1 (cost=0.00..21.60 rows=1160 width=4) diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/subplan.obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/subplan.obfuscated.txt new file mode 100644 index 00000000..3f6d93db --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/subplan.obfuscated.txt @@ -0,0 +1,5 @@ +Insert on explain_plan_test_1 (cost=24.50..49.00 rows=580 width=40) + -> Seq Scan on explain_plan_test_2 (cost=24.50..49.00 rows=580 width=40) + Filter: (NOT (hashed SubPlan 1)) + SubPlan 1 + -> Seq Scan on explain_plan_test_1 (cost=0.00..21.60 rows=1160 width=4) diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/subplan.query.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/subplan.query.txt new file mode 100644 index 00000000..4c63710b --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/subplan.query.txt @@ -0,0 +1 @@ +explain insert into explain_plan_test_1 select * from explain_plan_test_2 where explain_plan_test_2.d not in (select a from explain_plan_test_1) diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_integer.colon_obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_integer.colon_obfuscated.txt new file mode 100644 index 00000000..28a296df --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_integer.colon_obfuscated.txt @@ -0,0 +1,2 @@ + Index Scan using blogs_pkey on blogs (cost=0.00..8.27 rows=1 width=540) + Index Cond: ? diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_integer.explain.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_integer.explain.txt new file mode 100644 index 00000000..904186fa --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_integer.explain.txt @@ -0,0 +1,2 @@ + Index Scan using blogs_pkey on blogs (cost=0.00..8.27 rows=1 width=540) + Index Cond: (id = 1234) diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_integer.obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_integer.obfuscated.txt new file mode 100644 index 00000000..bc1714cd --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_integer.obfuscated.txt @@ -0,0 +1,2 @@ + Index Scan using blogs_pkey on blogs (cost=0.00..8.27 rows=1 width=540) + Index Cond: (id = ?) diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_integer.query.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_integer.query.txt new file mode 100644 index 00000000..bf9f428c --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_integer.query.txt @@ -0,0 +1 @@ +SELECT * FROM blogs WHERE blogs.id=1234 diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_regex_chars.colon_obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_regex_chars.colon_obfuscated.txt new file mode 100644 index 00000000..c1159e0d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_regex_chars.colon_obfuscated.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ? \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_regex_chars.explain.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_regex_chars.explain.txt new file mode 100644 index 00000000..7701fbb3 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_regex_chars.explain.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ((title)::text = '][^|)/('::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_regex_chars.obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_regex_chars.obfuscated.txt new file mode 100644 index 00000000..083affa2 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_regex_chars.obfuscated.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ((title)::text = ?::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_regex_chars.query.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_regex_chars.query.txt new file mode 100644 index 00000000..d3216f3e --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_regex_chars.query.txt @@ -0,0 +1 @@ +SELECT * FROM blogs WHERE blogs.title='][^|)/(' \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_substring.colon_obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_substring.colon_obfuscated.txt new file mode 100644 index 00000000..7ec5fb0e --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_substring.colon_obfuscated.txt @@ -0,0 +1,3 @@ + Index Scan using blogs_pkey on blogs (cost=0.00..8.27 rows=1 width=540) + Index Cond: ? + Filter: ? \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_substring.explain.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_substring.explain.txt new file mode 100644 index 00000000..c6a46a12 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_substring.explain.txt @@ -0,0 +1,3 @@ + Index Scan using blogs_pkey on blogs (cost=0.00..8.27 rows=1 width=540) + Index Cond: (id = 15402) + Filter: ((title)::text = 'logs'::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_substring.obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_substring.obfuscated.txt new file mode 100644 index 00000000..0302012b --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_substring.obfuscated.txt @@ -0,0 +1,3 @@ + Index Scan using blogs_pkey on blogs (cost=0.00..8.27 rows=1 width=540) + Index Cond: (id = ?) + Filter: ((title)::text = ?::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_substring.query.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_substring.query.txt new file mode 100644 index 00000000..32dca5b5 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/where_with_substring.query.txt @@ -0,0 +1 @@ +SELECT * FROM blogs WHERE blogs.id=15402 AND blogs.title='logs' \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case1.colon_obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case1.colon_obfuscated.txt new file mode 100644 index 00000000..c1159e0d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case1.colon_obfuscated.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ? \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case1.explain.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case1.explain.txt new file mode 100644 index 00000000..79e1da81 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case1.explain.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ((title)::text = 'foo''bar'::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case1.obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case1.obfuscated.txt new file mode 100644 index 00000000..083affa2 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case1.obfuscated.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ((title)::text = ?::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case1.query.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case1.query.txt new file mode 100644 index 00000000..2854fb92 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case1.query.txt @@ -0,0 +1 @@ +EXPLAIN SELECT * FROM blogs WHERE blogs.title=E'foo\'bar' \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case2.colon_obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case2.colon_obfuscated.txt new file mode 100644 index 00000000..c1159e0d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case2.colon_obfuscated.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ? \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case2.explain.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case2.explain.txt new file mode 100644 index 00000000..956a40e8 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case2.explain.txt @@ -0,0 +1,3 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ((title)::text = '\x08\x0C + \r '::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case2.obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case2.obfuscated.txt new file mode 100644 index 00000000..083affa2 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case2.obfuscated.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ((title)::text = ?::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case2.query.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case2.query.txt new file mode 100644 index 00000000..3dfab9ea --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case2.query.txt @@ -0,0 +1 @@ +SELECT * FROM blogs WHERE blogs.title=E'\b\f\n\r\t' \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case3.colon_obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case3.colon_obfuscated.txt new file mode 100644 index 00000000..c1159e0d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case3.colon_obfuscated.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ? \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case3.explain.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case3.explain.txt new file mode 100644 index 00000000..896ebec6 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case3.explain.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ((title)::text = '\x01\x079'::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case3.obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case3.obfuscated.txt new file mode 100644 index 00000000..083affa2 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case3.obfuscated.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ((title)::text = ?::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case3.query.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case3.query.txt new file mode 100644 index 00000000..05ca5b98 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case3.query.txt @@ -0,0 +1 @@ +SELECT * FROM blogs WHERE blogs.title=E'\1\7\9' \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case4.colon_obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case4.colon_obfuscated.txt new file mode 100644 index 00000000..c1159e0d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case4.colon_obfuscated.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ? \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case4.explain.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case4.explain.txt new file mode 100644 index 00000000..79e1da81 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case4.explain.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ((title)::text = 'foo''bar'::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case4.obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case4.obfuscated.txt new file mode 100644 index 00000000..083affa2 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case4.obfuscated.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ((title)::text = ?::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case4.query.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case4.query.txt new file mode 100644 index 00000000..2247258d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case4.query.txt @@ -0,0 +1 @@ +SELECT * FROM blogs WHERE blogs.title='foo\'bar' \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case5.colon_obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case5.colon_obfuscated.txt new file mode 100644 index 00000000..c1159e0d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case5.colon_obfuscated.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ? \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case5.explain.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case5.explain.txt new file mode 100644 index 00000000..b6b47302 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case5.explain.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ((title)::text = 'U'::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case5.obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case5.obfuscated.txt new file mode 100644 index 00000000..083affa2 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case5.obfuscated.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ((title)::text = ?::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case5.query.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case5.query.txt new file mode 100644 index 00000000..4089e9be --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case5.query.txt @@ -0,0 +1 @@ +SELECT * FROM blogs WHERE blogs.title=E'\x55' \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case6.colon_obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case6.colon_obfuscated.txt new file mode 100644 index 00000000..c1159e0d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case6.colon_obfuscated.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ? \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case6.explain.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case6.explain.txt new file mode 100644 index 00000000..37f3d9e7 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case6.explain.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ((title)::text = 'data'::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case6.obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case6.obfuscated.txt new file mode 100644 index 00000000..083affa2 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case6.obfuscated.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ((title)::text = ?::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case6.query.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case6.query.txt new file mode 100644 index 00000000..f8ef48f2 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case6.query.txt @@ -0,0 +1 @@ +SELECT * FROM blogs WHERE blogs.title=E'd\u0061t\U00000061' \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case7.colon_obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case7.colon_obfuscated.txt new file mode 100644 index 00000000..c1159e0d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case7.colon_obfuscated.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ? \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case7.explain.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case7.explain.txt new file mode 100644 index 00000000..37f3d9e7 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case7.explain.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ((title)::text = 'data'::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case7.obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case7.obfuscated.txt new file mode 100644 index 00000000..083affa2 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case7.obfuscated.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ((title)::text = ?::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case7.query.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case7.query.txt new file mode 100644 index 00000000..ca20c583 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case7.query.txt @@ -0,0 +1 @@ +SELECT * FROM blogs WHERE blogs.title=U&'d\0061t\+000061' \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case8.colon_obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case8.colon_obfuscated.txt new file mode 100644 index 00000000..c1159e0d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case8.colon_obfuscated.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ? \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case8.explain.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case8.explain.txt new file mode 100644 index 00000000..3941c5a1 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case8.explain.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ((title)::text = 'слон'::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case8.obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case8.obfuscated.txt new file mode 100644 index 00000000..083affa2 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case8.obfuscated.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ((title)::text = ?::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case8.query.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case8.query.txt new file mode 100644 index 00000000..08c76cb9 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case8.query.txt @@ -0,0 +1 @@ +SELECT * FROM blogs WHERE blogs.title=U&'\0441\043B\043E\043D' \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case9.colon_obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case9.colon_obfuscated.txt new file mode 100644 index 00000000..c1159e0d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case9.colon_obfuscated.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ? \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case9.explain.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case9.explain.txt new file mode 100644 index 00000000..37f3d9e7 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case9.explain.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ((title)::text = 'data'::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case9.obfuscated.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case9.obfuscated.txt new file mode 100644 index 00000000..083affa2 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case9.obfuscated.txt @@ -0,0 +1,2 @@ + Seq Scan on blogs (cost=0.00..11.75 rows=1 width=540) + Filter: ((title)::text = ?::text) \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case9.query.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case9.query.txt new file mode 100644 index 00000000..69184644 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/postgres_explain_obfuscation/with_escape_case9.query.txt @@ -0,0 +1 @@ +SELECT * FROM blogs WHERE blogs.title=U&'d!0061t!+000061' UESCAPE '!' \ No newline at end of file diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/1pack_1core_1logical.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/1pack_1core_1logical.txt new file mode 100644 index 00000000..7476beba --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/1pack_1core_1logical.txt @@ -0,0 +1,3 @@ +processor : 0 +model name : AMD Duron(tm) processor +cache size : 64 KB diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/1pack_1core_2logical.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/1pack_1core_2logical.txt new file mode 100644 index 00000000..fab5fa56 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/1pack_1core_2logical.txt @@ -0,0 +1,14 @@ +processor : 0 +model name : Intel(R) Pentium(R) 4 CPU 2.80GHz +cache size : 1024 KB +physical id : 0 +siblings : 2 +core id : 0 +cpu cores : 1 +processor : 1 +model name : Intel(R) Pentium(R) 4 CPU 2.80GHz +cache size : 1024 KB +physical id : 0 +siblings : 2 +core id : 0 +cpu cores : 1 diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/1pack_2core_2logical.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/1pack_2core_2logical.txt new file mode 100644 index 00000000..0281fcf5 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/1pack_2core_2logical.txt @@ -0,0 +1,14 @@ +processor : 0 +model name : Intel(R) Pentium(R) D CPU 3.00GHz +cache size : 2048 KB +physical id : 0 +siblings : 2 +core id : 0 +cpu cores : 2 +processor : 1 +model name : Intel(R) Pentium(R) D CPU 3.00GHz +cache size : 2048 KB +physical id : 0 +siblings : 2 +core id : 1 +cpu cores : 2 diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/1pack_4core_4logical.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/1pack_4core_4logical.txt new file mode 100644 index 00000000..0a3bf378 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/1pack_4core_4logical.txt @@ -0,0 +1,28 @@ +processor : 0 +model name : Intel(R) Xeon(R) CPU E5410 @ 2.33GHz +cache size : 6144 KB +physical id : 0 +siblings : 4 +core id : 0 +cpu cores : 4 +processor : 1 +model name : Intel(R) Xeon(R) CPU E5410 @ 2.33GHz +cache size : 6144 KB +physical id : 0 +siblings : 4 +core id : 1 +cpu cores : 4 +processor : 2 +model name : Intel(R) Xeon(R) CPU E5410 @ 2.33GHz +cache size : 6144 KB +physical id : 0 +siblings : 4 +core id : 2 +cpu cores : 4 +processor : 3 +model name : Intel(R) Xeon(R) CPU E5410 @ 2.33GHz +cache size : 6144 KB +physical id : 0 +siblings : 4 +core id : 3 +cpu cores : 4 diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/2pack_12core_24logical.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/2pack_12core_24logical.txt new file mode 100644 index 00000000..4177c036 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/2pack_12core_24logical.txt @@ -0,0 +1,575 @@ +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 1 +siblings : 12 +core id : 0 +cpu cores : 6 +apicid : 32 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5320.18 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 0 +siblings : 12 +core id : 0 +cpu cores : 6 +apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5320.05 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 2 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 1 +siblings : 12 +core id : 1 +cpu cores : 6 +apicid : 34 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5320.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 3 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 0 +siblings : 12 +core id : 1 +cpu cores : 6 +apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5320.05 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 4 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 1 +siblings : 12 +core id : 2 +cpu cores : 6 +apicid : 36 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5319.95 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 5 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 0 +siblings : 12 +core id : 2 +cpu cores : 6 +apicid : 4 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5320.06 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 6 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 1 +siblings : 12 +core id : 8 +cpu cores : 6 +apicid : 48 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5320.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 7 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 0 +siblings : 12 +core id : 8 +cpu cores : 6 +apicid : 16 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5319.98 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 8 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 1 +siblings : 12 +core id : 9 +cpu cores : 6 +apicid : 50 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5320.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 9 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 0 +siblings : 12 +core id : 9 +cpu cores : 6 +apicid : 18 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5319.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 10 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 1 +siblings : 12 +core id : 10 +cpu cores : 6 +apicid : 52 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5320.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 11 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 0 +siblings : 12 +core id : 10 +cpu cores : 6 +apicid : 20 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5320.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 12 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 1 +siblings : 12 +core id : 0 +cpu cores : 6 +apicid : 33 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5319.94 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 13 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 0 +siblings : 12 +core id : 0 +cpu cores : 6 +apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5320.05 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 14 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 1 +siblings : 12 +core id : 1 +cpu cores : 6 +apicid : 35 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5320.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 15 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 0 +siblings : 12 +core id : 1 +cpu cores : 6 +apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5320.05 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 16 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 1 +siblings : 12 +core id : 2 +cpu cores : 6 +apicid : 37 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5320.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 17 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 0 +siblings : 12 +core id : 2 +cpu cores : 6 +apicid : 5 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5320.05 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 18 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 1 +siblings : 12 +core id : 8 +cpu cores : 6 +apicid : 49 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5330.86 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 19 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 0 +siblings : 12 +core id : 8 +cpu cores : 6 +apicid : 17 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5320.05 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 20 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 1 +siblings : 12 +core id : 9 +cpu cores : 6 +apicid : 51 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5320.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 21 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 0 +siblings : 12 +core id : 9 +cpu cores : 6 +apicid : 19 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5320.01 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 22 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 1 +siblings : 12 +core id : 10 +cpu cores : 6 +apicid : 53 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5320.05 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] + +processor : 23 +vendor_id : GenuineIntel +cpu family : 6 +model : 44 +model name : Intel(R) Xeon(R) CPU X5650 @ 2.67GHz +stepping : 2 +cpu MHz : 2660.090 +cache size : 12288 KB +physical id : 0 +siblings : 12 +core id : 10 +cpu cores : 6 +apicid : 21 +fpu : yes +fpu_exception : yes +cpuid level : 11 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx pdpe1gb rdtscp lm constant_tsc ida nonstop_tsc arat pni monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr sse4_1 sse4_2 popcnt lahf_lm +bogomips : 5320.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: [8] diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/2pack_20core_40logical.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/2pack_20core_40logical.txt new file mode 100644 index 00000000..709087d3 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/2pack_20core_40logical.txt @@ -0,0 +1,999 @@ +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 0 +siblings : 20 +core id : 0 +cpu cores : 10 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5586.71 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 0 +siblings : 20 +core id : 1 +cpu cores : 10 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5586.71 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 0 +siblings : 20 +core id : 2 +cpu cores : 10 +apicid : 4 +initial apicid : 4 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5586.71 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 0 +siblings : 20 +core id : 3 +cpu cores : 10 +apicid : 6 +initial apicid : 6 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5586.71 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 4 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 0 +siblings : 20 +core id : 4 +cpu cores : 10 +apicid : 8 +initial apicid : 8 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5586.71 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 5 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 0 +siblings : 20 +core id : 8 +cpu cores : 10 +apicid : 16 +initial apicid : 16 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5586.71 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 6 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 0 +siblings : 20 +core id : 9 +cpu cores : 10 +apicid : 18 +initial apicid : 18 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5586.71 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 7 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 0 +siblings : 20 +core id : 10 +cpu cores : 10 +apicid : 20 +initial apicid : 20 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5586.71 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 8 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 0 +siblings : 20 +core id : 11 +cpu cores : 10 +apicid : 22 +initial apicid : 22 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5586.71 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 9 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 0 +siblings : 20 +core id : 12 +cpu cores : 10 +apicid : 24 +initial apicid : 24 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5586.71 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 10 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 1 +siblings : 20 +core id : 0 +cpu cores : 10 +apicid : 32 +initial apicid : 32 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5585.83 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 11 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 1 +siblings : 20 +core id : 1 +cpu cores : 10 +apicid : 34 +initial apicid : 34 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5585.83 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 12 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 1 +siblings : 20 +core id : 2 +cpu cores : 10 +apicid : 36 +initial apicid : 36 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5585.83 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 13 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 1 +siblings : 20 +core id : 3 +cpu cores : 10 +apicid : 38 +initial apicid : 38 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5585.83 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 14 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 1 +siblings : 20 +core id : 4 +cpu cores : 10 +apicid : 40 +initial apicid : 40 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5585.83 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 15 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 1 +siblings : 20 +core id : 8 +cpu cores : 10 +apicid : 48 +initial apicid : 48 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5585.83 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 16 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 1 +siblings : 20 +core id : 9 +cpu cores : 10 +apicid : 50 +initial apicid : 50 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5585.83 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 17 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 1 +siblings : 20 +core id : 10 +cpu cores : 10 +apicid : 52 +initial apicid : 52 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5585.83 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 18 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 1 +siblings : 20 +core id : 11 +cpu cores : 10 +apicid : 54 +initial apicid : 54 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5585.83 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 19 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 2801.000 +cache size : 25600 KB +physical id : 1 +siblings : 20 +core id : 12 +cpu cores : 10 +apicid : 56 +initial apicid : 56 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5585.83 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 20 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 0 +siblings : 20 +core id : 0 +cpu cores : 10 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5586.71 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 21 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 0 +siblings : 20 +core id : 1 +cpu cores : 10 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5586.71 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 22 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 0 +siblings : 20 +core id : 2 +cpu cores : 10 +apicid : 5 +initial apicid : 5 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5586.71 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 23 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 0 +siblings : 20 +core id : 3 +cpu cores : 10 +apicid : 7 +initial apicid : 7 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5586.71 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 24 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 0 +siblings : 20 +core id : 4 +cpu cores : 10 +apicid : 9 +initial apicid : 9 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5586.71 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 25 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 0 +siblings : 20 +core id : 8 +cpu cores : 10 +apicid : 17 +initial apicid : 17 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5586.71 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 26 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 0 +siblings : 20 +core id : 9 +cpu cores : 10 +apicid : 19 +initial apicid : 19 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5586.71 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 27 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 0 +siblings : 20 +core id : 10 +cpu cores : 10 +apicid : 21 +initial apicid : 21 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5586.71 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 28 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 0 +siblings : 20 +core id : 11 +cpu cores : 10 +apicid : 23 +initial apicid : 23 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5586.71 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 29 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 0 +siblings : 20 +core id : 12 +cpu cores : 10 +apicid : 25 +initial apicid : 25 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5586.71 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 30 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 1 +siblings : 20 +core id : 0 +cpu cores : 10 +apicid : 33 +initial apicid : 33 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5585.83 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 31 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 1 +siblings : 20 +core id : 1 +cpu cores : 10 +apicid : 35 +initial apicid : 35 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5585.83 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 32 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 1 +siblings : 20 +core id : 2 +cpu cores : 10 +apicid : 37 +initial apicid : 37 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5585.83 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 33 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 1 +siblings : 20 +core id : 3 +cpu cores : 10 +apicid : 39 +initial apicid : 39 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5585.83 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 34 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 1 +siblings : 20 +core id : 4 +cpu cores : 10 +apicid : 41 +initial apicid : 41 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5585.83 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 35 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 1 +siblings : 20 +core id : 8 +cpu cores : 10 +apicid : 49 +initial apicid : 49 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5585.83 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 36 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 1 +siblings : 20 +core id : 9 +cpu cores : 10 +apicid : 51 +initial apicid : 51 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5585.83 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 37 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 1 +siblings : 20 +core id : 10 +cpu cores : 10 +apicid : 53 +initial apicid : 53 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5585.83 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 38 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 1 +siblings : 20 +core id : 11 +cpu cores : 10 +apicid : 55 +initial apicid : 55 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5585.83 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 39 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz +stepping : 4 +cpu MHz : 1200.000 +cache size : 25600 KB +physical id : 1 +siblings : 20 +core id : 12 +cpu cores : 10 +apicid : 57 +initial apicid : 57 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dts tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms +bogomips : 5585.83 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/2pack_2core_2logical.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/2pack_2core_2logical.txt new file mode 100644 index 00000000..f67aaa7f --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/2pack_2core_2logical.txt @@ -0,0 +1,51 @@ +processor : 0 +vendor_id : AuthenticAMD +cpu family : 15 +model : 33 +model name : Dual Core AMD Opteron(tm) Processor 270 +stepping : 2 +cpu MHz : 2004.546 +cache size : 1024 KB +physical id : 0 +siblings : 1 +core id : 0 +cpu cores : 1 +fpu : yes +fpu_exception : yes +cpuid level : 1 +wp : yes +flags : fpu tsc msr pae mce cx8 apic mca cmov pat pse36 clflush mmx fx +sr sse sse2 ht syscall nx mmxext fxsr_opt lm 3dnowext 3dnow pni lahf_lm cmp_lega +cy +bogomips : 4011.21 +TLB size : 1024 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: ts fid vid ttp + +processor : 1 +vendor_id : AuthenticAMD +cpu family : 15 +model : 33 +model name : Dual Core AMD Opteron(tm) Processor 270 +stepping : 2 +cpu MHz : 2004.546 +cache size : 1024 KB +physical id : 1 +siblings : 1 +core id : 0 +cpu cores : 1 +fpu : yes +fpu_exception : yes +cpuid level : 1 +wp : yes +flags : fpu tsc msr pae mce cx8 apic mca cmov pat pse36 clflush mmx fx +sr sse sse2 ht syscall nx mmxext fxsr_opt lm 3dnowext 3dnow up pni lahf_lm cmp_l +egacy +bogomips : 4011.21 +TLB size : 1024 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: ts fid vid ttp diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/2pack_2core_4logical.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/2pack_2core_4logical.txt new file mode 100644 index 00000000..cedcb7aa --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/2pack_2core_4logical.txt @@ -0,0 +1,28 @@ +processor : 0 +model name : Intel(R) Xeon(TM) CPU 3.60GHz +cache size : 1024 KB +physical id : 0 +siblings : 2 +core id : 0 +cpu cores : 1 +processor : 1 +model name : Intel(R) Xeon(TM) CPU 3.60GHz +cache size : 1024 KB +physical id : 3 +siblings : 2 +core id : 0 +cpu cores : 1 +processor : 2 +model name : Intel(R) Xeon(TM) CPU 3.60GHz +cache size : 1024 KB +physical id : 0 +siblings : 2 +core id : 0 +cpu cores : 1 +processor : 3 +model name : Intel(R) Xeon(TM) CPU 3.60GHz +cache size : 1024 KB +physical id : 3 +siblings : 2 +core id : 0 +cpu cores : 1 diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/2pack_4core_4logical.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/2pack_4core_4logical.txt new file mode 100644 index 00000000..6e6f418b --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/2pack_4core_4logical.txt @@ -0,0 +1,28 @@ +processor : 0 +model name : Intel(R) Xeon(R) CPU 5160 @ 3.00GHz +cache size : 4096 KB +physical id : 0 +siblings : 2 +core id : 0 +cpu cores : 2 +processor : 1 +model name : Intel(R) Xeon(R) CPU 5160 @ 3.00GHz +cache size : 4096 KB +physical id : 0 +siblings : 2 +core id : 1 +cpu cores : 2 +processor : 2 +model name : Intel(R) Xeon(R) CPU 5160 @ 3.00GHz +cache size : 4096 KB +physical id : 3 +siblings : 2 +core id : 0 +cpu cores : 2 +processor : 3 +model name : Intel(R) Xeon(R) CPU 5160 @ 3.00GHz +cache size : 4096 KB +physical id : 3 +siblings : 2 +core id : 1 +cpu cores : 2 diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/4pack_4core_4logical.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/4pack_4core_4logical.txt new file mode 100644 index 00000000..a1f0db10 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/4pack_4core_4logical.txt @@ -0,0 +1,103 @@ +processor : 0 +vendor_id : AuthenticAMD +cpu family : 15 +model : 65 +model name : Dual-Core AMD Opteron(tm) Processor 2218 HE +stepping : 3 +cpu MHz : 2599.998 +cache size : 1024 KB +physical id : 0 +siblings : 1 +core id : 0 +cpu cores : 1 +fpu : yes +fpu_exception : yes +cpuid level : 1 +wp : yes +flags : fpu tsc msr pae mce cx8 apic mca cmov pat pse36 clflush mmx fx +sr sse sse2 ht syscall nx mmxext fxsr_opt rdtscp lm 3dnowext 3dnow pni cx16 lahf +_lm cmp_legacy svm extapic cr8_legacy +bogomips : 5202.15 +TLB size : 1024 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: ts fid vid ttp tm stc + +processor : 1 +vendor_id : AuthenticAMD +cpu family : 15 +model : 65 +model name : Dual-Core AMD Opteron(tm) Processor 2218 HE +stepping : 3 +cpu MHz : 2599.998 +cache size : 1024 KB +physical id : 1 +siblings : 1 +core id : 0 +cpu cores : 1 +fpu : yes +fpu_exception : yes +cpuid level : 1 +wp : yes +flags : fpu tsc msr pae mce cx8 apic mca cmov pat pse36 clflush mmx fx +sr sse sse2 ht syscall nx mmxext fxsr_opt rdtscp lm 3dnowext 3dnow up pni cx16 l +ahf_lm cmp_legacy svm extapic cr8_legacy +bogomips : 5202.15 +TLB size : 1024 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: ts fid vid ttp tm stc + +processor : 2 +vendor_id : AuthenticAMD +cpu family : 15 +model : 65 +model name : Dual-Core AMD Opteron(tm) Processor 2218 HE +stepping : 3 +cpu MHz : 2599.998 +cache size : 1024 KB +physical id : 2 +siblings : 1 +core id : 0 +cpu cores : 1 +fpu : yes +fpu_exception : yes +cpuid level : 1 +wp : yes +flags : fpu tsc msr pae mce cx8 apic mca cmov pat pse36 clflush mmx fx +sr sse sse2 ht syscall nx mmxext fxsr_opt rdtscp lm 3dnowext 3dnow up pni cx16 l +ahf_lm cmp_legacy svm extapic cr8_legacy +bogomips : 5202.15 +TLB size : 1024 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: ts fid vid ttp tm stc + +processor : 3 +vendor_id : AuthenticAMD +cpu family : 15 +model : 65 +model name : Dual-Core AMD Opteron(tm) Processor 2218 HE +stepping : 3 +cpu MHz : 2599.998 +cache size : 1024 KB +physical id : 3 +siblings : 1 +core id : 0 +cpu cores : 1 +fpu : yes +fpu_exception : yes +cpuid level : 1 +wp : yes +flags : fpu tsc msr pae mce cx8 apic mca cmov pat pse36 clflush mmx fx +sr sse sse2 ht syscall nx mmxext fxsr_opt rdtscp lm 3dnowext 3dnow up pni cx16 l +ahf_lm cmp_legacy svm extapic cr8_legacy +bogomips : 5202.15 +TLB size : 1024 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 40 bits physical, 48 bits virtual +power management: ts fid vid ttp tm stc diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/8pack_8core_8logical.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/8pack_8core_8logical.txt new file mode 100644 index 00000000..efcaae2e --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/8pack_8core_8logical.txt @@ -0,0 +1,199 @@ +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 15 +model name : Intel(R) Xeon(R) CPU E5345 @ 2.33GHz +stepping : 11 +cpu MHz : 2327.498 +cache size : 4096 KB +physical id : 0 +siblings : 1 +core id : 0 +cpu cores : 1 +fpu : yes +fpu_exception : yes +cpuid level : 10 +wp : yes +flags : fpu tsc msr pae mce cx8 apic mca cmov pat pse36 clflush dts ac +pi mmx fxsr sse sse2 ss ht tm syscall nx lm constant_tsc pni monitor ds_cpl vmx +est tm2 ssse3 cx16 xtpr dca lahf_lm +bogomips : 4654.10 +clflush size : 64 +cache_alignment : 64 +address sizes : 38 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 15 +model name : Intel(R) Xeon(R) CPU E5345 @ 2.33GHz +stepping : 11 +cpu MHz : 2327.498 +cache size : 4096 KB +physical id : 1 +siblings : 1 +core id : 0 +cpu cores : 1 +fpu : yes +fpu_exception : yes +cpuid level : 10 +wp : yes +flags : fpu tsc msr pae mce cx8 apic mca cmov pat pse36 clflush dts ac +pi mmx fxsr sse sse2 ss ht tm syscall nx lm constant_tsc up pni monitor ds_cpl v +mx est tm2 ssse3 cx16 xtpr dca lahf_lm +bogomips : 4654.10 +clflush size : 64 +cache_alignment : 64 +address sizes : 38 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : GenuineIntel +cpu family : 6 +model : 15 +model name : Intel(R) Xeon(R) CPU E5345 @ 2.33GHz +stepping : 11 +cpu MHz : 2327.498 +cache size : 4096 KB +physical id : 2 +siblings : 1 +core id : 0 +cpu cores : 1 +fpu : yes +fpu_exception : yes +cpuid level : 10 +wp : yes +flags : fpu tsc msr pae mce cx8 apic mca cmov pat pse36 clflush dts ac +pi mmx fxsr sse sse2 ss ht tm syscall nx lm constant_tsc up pni monitor ds_cpl v +mx est tm2 ssse3 cx16 xtpr dca lahf_lm +bogomips : 4654.10 +clflush size : 64 +cache_alignment : 64 +address sizes : 38 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : GenuineIntel +cpu family : 6 +model : 15 +model name : Intel(R) Xeon(R) CPU E5345 @ 2.33GHz +stepping : 11 +cpu MHz : 2327.498 +cache size : 4096 KB +physical id : 3 +siblings : 1 +core id : 0 +cpu cores : 1 +fpu : yes +fpu_exception : yes +cpuid level : 10 +wp : yes +flags : fpu tsc msr pae mce cx8 apic mca cmov pat pse36 clflush dts ac +pi mmx fxsr sse sse2 ss ht tm syscall nx lm constant_tsc up pni monitor ds_cpl v +mx est tm2 ssse3 cx16 xtpr dca lahf_lm +bogomips : 4654.10 +clflush size : 64 +cache_alignment : 64 +address sizes : 38 bits physical, 48 bits virtual +power management: + +processor : 4 +vendor_id : GenuineIntel +cpu family : 6 +model : 15 +model name : Intel(R) Xeon(R) CPU E5345 @ 2.33GHz +stepping : 11 +cpu MHz : 2327.498 +cache size : 4096 KB +physical id : 4 +siblings : 1 +core id : 0 +cpu cores : 1 +fpu : yes +fpu_exception : yes +cpuid level : 10 +wp : yes +flags : fpu tsc msr pae mce cx8 apic mca cmov pat pse36 clflush dts ac +pi mmx fxsr sse sse2 ss ht tm syscall nx lm constant_tsc up pni monitor ds_cpl v +mx est tm2 ssse3 cx16 xtpr dca lahf_lm +bogomips : 4654.10 +clflush size : 64 +cache_alignment : 64 +address sizes : 38 bits physical, 48 bits virtual +power management: + +processor : 5 +vendor_id : GenuineIntel +cpu family : 6 +model : 15 +model name : Intel(R) Xeon(R) CPU E5345 @ 2.33GHz +stepping : 11 +cpu MHz : 2327.498 +cache size : 4096 KB +physical id : 5 +siblings : 1 +core id : 0 +cpu cores : 1 +fpu : yes +fpu_exception : yes +cpuid level : 10 +wp : yes +flags : fpu tsc msr pae mce cx8 apic mca cmov pat pse36 clflush dts ac +pi mmx fxsr sse sse2 ss ht tm syscall nx lm constant_tsc up pni monitor ds_cpl v +mx est tm2 ssse3 cx16 xtpr dca lahf_lm +bogomips : 4654.10 +clflush size : 64 +cache_alignment : 64 +address sizes : 38 bits physical, 48 bits virtual +power management: + +processor : 6 +vendor_id : GenuineIntel +cpu family : 6 +model : 15 +model name : Intel(R) Xeon(R) CPU E5345 @ 2.33GHz +stepping : 11 +cpu MHz : 2327.498 +cache size : 4096 KB +physical id : 6 +siblings : 1 +core id : 0 +cpu cores : 1 +fpu : yes +fpu_exception : yes +cpuid level : 10 +wp : yes +flags : fpu tsc msr pae mce cx8 apic mca cmov pat pse36 clflush dts ac +pi mmx fxsr sse sse2 ss ht tm syscall nx lm constant_tsc up pni monitor ds_cpl v +mx est tm2 ssse3 cx16 xtpr dca lahf_lm +bogomips : 4654.10 +clflush size : 64 +cache_alignment : 64 +address sizes : 38 bits physical, 48 bits virtual +power management: + +processor : 7 +vendor_id : GenuineIntel +cpu family : 6 +model : 15 +model name : Intel(R) Xeon(R) CPU E5345 @ 2.33GHz +stepping : 11 +cpu MHz : 2327.498 +cache size : 4096 KB +physical id : 7 +siblings : 1 +core id : 0 +cpu cores : 1 +fpu : yes +fpu_exception : yes +cpuid level : 10 +wp : yes +flags : fpu tsc msr pae mce cx8 apic mca cmov pat pse36 clflush dts ac +pi mmx fxsr sse sse2 ss ht tm syscall nx lm constant_tsc up pni monitor ds_cpl v +mx est tm2 ssse3 cx16 xtpr dca lahf_lm +bogomips : 4654.10 +clflush size : 64 +cache_alignment : 64 +address sizes : 38 bits physical, 48 bits virtual +power management: diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/README.md b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/README.md new file mode 100644 index 00000000..04997821 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/README.md @@ -0,0 +1,28 @@ +These tests are for determining the numbers of physical packages, physical cores, +and logical processors from the data returned by /proc/cpuinfo on Linux hosts. +Each text file in this directory is the output of /proc/cpuinfo on various machines. + +The names of all test files should be of the form `Apack_Bcore_Clogical.txt` +where `A`, `B`, and `C` are integers or the character `X`. For example, +a single quad-core processor without hyperthreading would correspond to +`1pack_4core_4logical.txt`, while two 6-core processors with hyperthreading +would correspond to `2pack_12core_24logical.txt`, and would be pretty sweet. + +Using `A`, `B`, and `C` from above, code processing the text in these files +should produce the following expected values: + +| property | value | +| -------------------- |---------| +| # physical packages | `A` | +| # physical cores | `B` | +| # logical processors | `C` | + +(Obviously, the processing code should do this with no knowledge of the filenames.) + +If any of `A`, `B`, or `C` are the character `X` instead of an integer, then +processing code should not return a value (return `null`, return `nil`, +raise an exception... whatever makes most sense for your agent). + +There is a malformed.txt file which is a random file that does not adhere to +any /proc/cpuinfo format. The expected result is `null` for packages, cores and +processors. diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/Xpack_Xcore_2logical.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/Xpack_Xcore_2logical.txt new file mode 100644 index 00000000..cc9b4917 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/Xpack_Xcore_2logical.txt @@ -0,0 +1,43 @@ +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 15 +model name : Intel(R) Xeon(R) CPU E5345 @ 2.33GHz +stepping : 11 +cpu MHz : 2327.498 +cache size : 4096 KB +fdiv_bug : no +hlt_bug : no +f00f_bug : no +coma_bug : no +fpu : yes +fpu_exception : yes +cpuid level : 10 +wp : yes +flags : fpu tsc msr pae mce cx8 apic mca cmov pat pse36 clflush dts ac +pi mmx fxsr sse sse2 ss ht tm pbe nx lm constant_tsc pni monitor ds_cpl vmx est +tm2 ssse3 cx16 xtpr dca lahf_lm +bogomips : 5821.98 +clflush size : 64 + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 15 +model name : Intel(R) Xeon(R) CPU E5345 @ 2.33GHz +stepping : 11 +cpu MHz : 2327.498 +cache size : 4096 KB +fdiv_bug : no +hlt_bug : no +f00f_bug : no +coma_bug : no +fpu : yes +fpu_exception : yes +cpuid level : 10 +wp : yes +flags : fpu tsc msr pae mce cx8 apic mca cmov pat pse36 clflush dts ac +pi mmx fxsr sse sse2 ss ht tm pbe nx lm constant_tsc up pni monitor ds_cpl vmx e +st tm2 ssse3 cx16 xtpr dca lahf_lm +bogomips : 5821.98 +clflush size : 64 diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/malformed_file.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/malformed_file.txt new file mode 100644 index 00000000..5dfa0137 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_cpuinfo/malformed_file.txt @@ -0,0 +1,3 @@ +This is a random text file that does NOT adhere to the /proc/cpuinfo format. +xxxYYYZZz + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_meminfo/README.md b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_meminfo/README.md new file mode 100644 index 00000000..be81c084 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_meminfo/README.md @@ -0,0 +1,7 @@ +These tests are for determining the physical memory from the data returned by +/proc/meminfo on Linux hosts. The total physical memory of the linux system is +reported as part of the enviornment values. The key used by the Python agent +is 'Total Physical Memory (MB)'. + +The names of all test files should be of the form `meminfo_nnnnMB.txt`. The +value `nnnn` in the filename is the physical memory of that system in MB. diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_meminfo/meminfo_4096MB.txt b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_meminfo/meminfo_4096MB.txt new file mode 100644 index 00000000..f9f10b25 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/proc_meminfo/meminfo_4096MB.txt @@ -0,0 +1,47 @@ +MemTotal: 4194304 kB +MemFree: 931724 kB +Buffers: 146992 kB +Cached: 545044 kB +SwapCached: 0 kB +Active: 551644 kB +Inactive: 454660 kB +Active(anon): 315628 kB +Inactive(anon): 9084 kB +Active(file): 236016 kB +Inactive(file): 445576 kB +Unevictable: 0 kB +Mlocked: 0 kB +HighTotal: 1183624 kB +HighFree: 295288 kB +LowTotal: 877428 kB +LowFree: 636436 kB +SwapTotal: 1046524 kB +SwapFree: 1046524 kB +Dirty: 72 kB +Writeback: 0 kB +AnonPages: 314416 kB +Mapped: 127944 kB +Shmem: 10448 kB +Slab: 75852 kB +SReclaimable: 59144 kB +SUnreclaim: 16708 kB +KernelStack: 2984 kB +PageTables: 7552 kB +NFS_Unstable: 0 kB +Bounce: 0 kB +WritebackTmp: 0 kB +CommitLimit: 2077048 kB +Committed_AS: 2433452 kB +VmallocTotal: 122880 kB +VmallocUsed: 23288 kB +VmallocChunk: 98348 kB +HardwareCorrupted: 0 kB +AnonHugePages: 0 kB +HugePages_Total: 0 +HugePages_Free: 0 +HugePages_Rsvd: 0 +HugePages_Surp: 0 +Hugepagesize: 2048 kB +DirectMap4k: 12280 kB +DirectMap2M: 901120 kB + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_footer_insertion_location/close-body-in-comment.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_footer_insertion_location/close-body-in-comment.html new file mode 100644 index 00000000..3252494c --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_footer_insertion_location/close-body-in-comment.html @@ -0,0 +1,10 @@ + + + + Comment contains a close body tag + + +

The quick brown fox jumps over the lazy dog.

+ + EXPECTED_RUM_FOOTER_LOCATION + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_footer_insertion_location/dynamic-iframe.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_footer_insertion_location/dynamic-iframe.html new file mode 100644 index 00000000..18b82ff8 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_footer_insertion_location/dynamic-iframe.html @@ -0,0 +1,19 @@ + + + + Dynamic iframe Generation + + +

The quick brown fox jumps over the lazy dog.

+ + + EXPECTED_RUM_FOOTER_LOCATION + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/basic.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/basic.html new file mode 100644 index 00000000..4afa155e --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/basic.html @@ -0,0 +1,10 @@ + + EXPECTED_RUM_LOADER_LOCATION + im a title + + + + im some body text + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/body_with_attributes.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/body_with_attributes.html new file mode 100644 index 00000000..5442cdbe --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/body_with_attributes.html @@ -0,0 +1,3 @@ +EXPECTED_RUM_LOADER_LOCATION + This isn't great HTML but it's what we've got. + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/charset_tag.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/charset_tag.html new file mode 100644 index 00000000..b050317b --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/charset_tag.html @@ -0,0 +1,11 @@ + + + im a title + + + EXPECTED_RUM_LOADER_LOCATION + + im some body text + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/charset_tag_after_x_ua_tag.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/charset_tag_after_x_ua_tag.html new file mode 100644 index 00000000..7cbc188a --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/charset_tag_after_x_ua_tag.html @@ -0,0 +1,11 @@ + + + im a title + + EXPECTED_RUM_LOADER_LOCATION + + + im some body text + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/charset_tag_before_x_ua_tag.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/charset_tag_before_x_ua_tag.html new file mode 100644 index 00000000..a8f5fcd2 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/charset_tag_before_x_ua_tag.html @@ -0,0 +1,11 @@ + + + im a title + + EXPECTED_RUM_LOADER_LOCATION + + + im some body text + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/charset_tag_with_spaces.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/charset_tag_with_spaces.html new file mode 100644 index 00000000..64ba08f8 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/charset_tag_with_spaces.html @@ -0,0 +1,11 @@ + + + im a title + + + EXPECTED_RUM_LOADER_LOCATION + + im some body text + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/comments1.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/comments1.html new file mode 100644 index 00000000..3d905da5 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/comments1.html @@ -0,0 +1,24 @@ + + + + + + + + + + + + OPT® + + + Cribbed from the Java agent + + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/comments2.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/comments2.html new file mode 100644 index 00000000..33b7e608 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/comments2.html @@ -0,0 +1,24 @@ + + + + + + + + + + + + OPT® + + + Cribbed from the Java agent + + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/content_type_charset_tag.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/content_type_charset_tag.html new file mode 100644 index 00000000..3543dab4 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/content_type_charset_tag.html @@ -0,0 +1,11 @@ + + + im a title + + + EXPECTED_RUM_LOADER_LOCATION + + im some body text + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/content_type_charset_tag_after_x_ua_tag.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/content_type_charset_tag_after_x_ua_tag.html new file mode 100644 index 00000000..1f1f91ee --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/content_type_charset_tag_after_x_ua_tag.html @@ -0,0 +1,11 @@ + + + im a title + + EXPECTED_RUM_LOADER_LOCATION + + + im some body text + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/content_type_charset_tag_before_x_ua_tag.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/content_type_charset_tag_before_x_ua_tag.html new file mode 100644 index 00000000..ccbed78e --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/content_type_charset_tag_before_x_ua_tag.html @@ -0,0 +1,11 @@ + + + im a title + + EXPECTED_RUM_LOADER_LOCATION + + + im some body text + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/empty_head b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/empty_head new file mode 100644 index 00000000..f2b2afac --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/empty_head @@ -0,0 +1,4 @@ + + + im some body text + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/gt_in_quotes1.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/gt_in_quotes1.html new file mode 100644 index 00000000..3b9cc3f8 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/gt_in_quotes1.html @@ -0,0 +1,27 @@ + + + + + + + + + + + + OPT® + + + + + + Cribbed from the Java agent + + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/gt_in_quotes2.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/gt_in_quotes2.html new file mode 100644 index 00000000..888393cc --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/gt_in_quotes2.html @@ -0,0 +1,24 @@ + + + + + + + + + + + + OPT® + + + Cribbed from the Java agent + + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/gt_in_quotes_mismatch.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/gt_in_quotes_mismatch.html new file mode 100644 index 00000000..a2af88c8 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/gt_in_quotes_mismatch.html @@ -0,0 +1,24 @@ + + + + + + ication" content="H7LxlilrDfbkX34sqq1X1JwoSCX0c6v15HhUYx49YtE" /> + + + + + + OPT® + + + Cribbed from the Java agent + + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/gt_in_single_quotes1.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/gt_in_single_quotes1.html new file mode 100644 index 00000000..539b4a79 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/gt_in_single_quotes1.html @@ -0,0 +1,25 @@ + + + + + + + + + + + + + OPT® + + + Cribbed from the Java agent + + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/gt_in_single_quotes_mismatch.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/gt_in_single_quotes_mismatch.html new file mode 100644 index 00000000..1dcc6c80 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/gt_in_single_quotes_mismatch.html @@ -0,0 +1,25 @@ + + + + + + + .01' content='9174ACA637FC44E24AD81253FF836544\' /> + + + + + + OPT® + + + Cribbed from the Java agent + + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/head_with_attributes.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/head_with_attributes.html new file mode 100644 index 00000000..07f73ada --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/head_with_attributes.html @@ -0,0 +1,10 @@ + + EXPECTED_RUM_LOADER_LOCATION + im a title + + + + im some body text + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/incomplete_non_meta_tags.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/incomplete_non_meta_tags.html new file mode 100644 index 00000000..3003f058 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/incomplete_non_meta_tags.html @@ -0,0 +1,10 @@ + + EXPECTED_RUM_LOADER_LOCATION + + + +EXPECTED_RUM_LOADER_LOCATION + Cribbed from the Java agent + + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/no_header.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/no_header.html new file mode 100644 index 00000000..ee8e6ce0 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/no_header.html @@ -0,0 +1,7 @@ + + + EXPECTED_RUM_LOADER_LOCATION + Cribbed from the Java agent + + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/no_html_and_no_header.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/no_html_and_no_header.html new file mode 100644 index 00000000..8e01bd2c --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/no_html_and_no_header.html @@ -0,0 +1,3 @@ +EXPECTED_RUM_LOADER_LOCATION + This isn't great HTML but it's what we've got. + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/no_start_header.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/no_start_header.html new file mode 100644 index 00000000..4525b759 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/no_start_header.html @@ -0,0 +1,9 @@ + + + + + EXPECTED_RUM_LOADER_LOCATION + Cribbed from the Java agent + + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/script1.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/script1.html new file mode 100644 index 00000000..20b2f6b6 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/script1.html @@ -0,0 +1,19 @@ + + + EXPECTED_RUM_LOADER_LOCATION + + Castor + + + + + + + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/script2.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/script2.html new file mode 100644 index 00000000..90b01c5e --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/script2.html @@ -0,0 +1,17 @@ + + + EXPECTED_RUM_LOADER_LOCATION + Castor + + + + + + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/x_ua_meta_tag.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/x_ua_meta_tag.html new file mode 100644 index 00000000..950d9a73 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/x_ua_meta_tag.html @@ -0,0 +1,10 @@ + + + im a title + EXPECTED_RUM_LOADER_LOCATION + + + im some body text + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/x_ua_meta_tag_multiline.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/x_ua_meta_tag_multiline.html new file mode 100644 index 00000000..a801f6ca --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/x_ua_meta_tag_multiline.html @@ -0,0 +1,11 @@ + + + im a title + EXPECTED_RUM_LOADER_LOCATION + + + im some body text + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/x_ua_meta_tag_multiple_tags.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/x_ua_meta_tag_multiple_tags.html new file mode 100755 index 00000000..1440bb5d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/x_ua_meta_tag_multiple_tags.html @@ -0,0 +1,12 @@ + + + im a title + EXPECTED_RUM_LOADER_LOCATION + + + + + im some body text + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/x_ua_meta_tag_spaces_around_equals.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/x_ua_meta_tag_spaces_around_equals.html new file mode 100644 index 00000000..dd006e74 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/x_ua_meta_tag_spaces_around_equals.html @@ -0,0 +1,10 @@ + + + im a title + EXPECTED_RUM_LOADER_LOCATION + + + im some body text + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/x_ua_meta_tag_with_others.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/x_ua_meta_tag_with_others.html new file mode 100644 index 00000000..57674c5f --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/x_ua_meta_tag_with_others.html @@ -0,0 +1,11 @@ + + + im a title + EXPECTED_RUM_LOADER_LOCATION + + + + im some body text + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/x_ua_meta_tag_with_spaces.html b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/x_ua_meta_tag_with_spaces.html new file mode 100644 index 00000000..215ee13b --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/rum_loader_insertion_location/x_ua_meta_tag_with_spaces.html @@ -0,0 +1,10 @@ + + + im a title + EXPECTED_RUM_LOADER_LOCATION + + + im some body text + diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/sql_obfuscation/README.md b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/sql_obfuscation/README.md new file mode 100644 index 00000000..5b1ef5e6 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/sql_obfuscation/README.md @@ -0,0 +1,36 @@ +These test cases cover obfuscation (more properly, masking) of literal values +from SQL statements captured by agents. SQL statements may be captured and +attached to transaction trace nodes, or to slow SQL traces. + +`sql_obfuscation.json` contains an array of test cases. The inputs for each +test case are in the `sql` property of each object. Each test case also has an +`obfuscated` property which is an array containing at least one valid output. + +Test cases also have a `dialects` property, which is an array of strings which +specify which sql dialects the test should apply to. See "SQL Syntax Documentation" list below. This is relevant because for example, PostgreSQL uses +different identifier and string quoting rules than MySQL (most notably, +double-quoted string literals are not allowed in PostgreSQL, where +double-quotes are instead used around identifiers). + +Test cases may also contain the following properties: + * `malformed`: (boolean) tests who's SQL queries are not valid SQL in any + quoting mode. Some agents may choose to attempt to obfuscate these cases, + and others may instead just replace the query entirely with a placeholder + message. + * `pathological`: (boolean) tests which are designed specifically to break + specific methods of obfuscation, or contain patterns that are known to be + difficult to handle correctly + * `comments`: an array of strings that could be usefult for understanding + the test. + +The following database documentation may be helpful in understanding these test +cases: +* [MySQL String Literals](http://dev.mysql.com/doc/refman/5.5/en/string-literals.html) +* [PostgreSQL String Constants](http://www.postgresql.org/docs/8.2/static/sql-syntax-lexical.html#SQL-SYNTAX-CONSTANTS) + +SQL Syntax Documentation: +* [MySQL](http://dev.mysql.com/doc/refman/5.5/en/language-structure.html) +* [PostgreSQL](http://www.postgresql.org/docs/8.4/static/sql-syntax.html) +* [Cassandra](http://docs.datastax.com/en/cql/3.1/cql/cql_reference/cql_lexicon_c.html) +* [Oracle](http://docs.oracle.com/cd/B28359_01/appdev.111/b28370/langelems.htm) +* [SQLite](https://www.sqlite.org/lang.html) diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/synthetics/README.md b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/synthetics/README.md new file mode 100644 index 00000000..61acbc83 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/synthetics/README.md @@ -0,0 +1,65 @@ +# Synthetics Tests + +The Synthetics tests are designed to verify that the agent handles valid and invalid Synthetics requests. + +Each test should run a simulated web transaction. A Synthetics HTTP request header is added to the incoming request at the beginning of a web transaction. During the course of the web transaction, an external request is made. And, at the completion of the web transaction, both a Transaction Trace and Transaction Event are recorded. + +Each test then verifies that the correct attributes are added to the Transaction Trace and Transaction Event, and the proper request header is added to the external request when required. Or, in the case of an invalid Synthetics request, that the attributes and request header are **not** added. + +## Name + +| Name | Meaning | +| ---- | ------- | +| `name` | A human-meaningful name for the test case. | + +## Settings + +The `settings` hash contains a number of key-value pairs that the agent will need to use for configuration for the test. + +| Name | Meaning | +| ---- | ------- | +| `agentEncodingKey`| The encoding key used by the agent for deobfuscation of the Synthetics request header. | +| `syntheticsEncodingKey` | The encoding key used by Synthetics to obfuscate the Synthetics request header. In most tests, `agentEncodingKey` and `syntheticsEncodingKey` are the same. | +| `transactionGuid` | The GUID of the simulated transaction. In a non-simulated transaction, this will be randomly generated. But, for testing purposes, you should assign this value as the GUID, since the tests will check for this value to be set in the `nr.guid` attribute of the Transaction Event. | +| `trustedAccountIds` | A list of accounts ids that the agent trusts. If the Synthetics request contains a non-trusted account id, it is an invalid request.| + +## Inputs + +The input for each test is a Synthetics request header. The test fixture file shows both the de-obfuscated version of the payload, as well as the resulting obfuscated version. + +| Name | Meaning | +| ---- | ------- | +| `inputHeaderPayload` | A decoded form of the contents of the `X-NewRelic-Synthetics` request header. | +| `inputObfuscatedHeader` | An obfuscated form of the `X-NewRelic-Synthetics` request header. If you obfuscate `inputHeaderPayload` using the `syntheticsEncodingKey`, this should be the output. | + +## Outputs + +There are three different outputs that are tested for: Transaction Trace, Transaction Event, and External Request Header. + +### outputTransactionTrace + +The `outputTransactionTrace` hash contains three objects: + +| Name | Meaning | +| ---- | ------- | +| `header` | The last field of the transaction sample array should be set to the Synthetics Resource ID for a Synthetics request, and should be set to `null` if it isn't. (The last field in the array is the 10th element in the header array, but is `header[9]` in zero-based array notation, so the key name is `field_9`.) | +| `expectedIntrinsics` | A set of key-value pairs that represent the attributes that should be set in the intrinsics section of the Transaction Trace. **Note**: If the agent has not implemented the Agent Attributes spec, then the agent should save the attributes in the `Custom` section, and the attribute names should have 'nr.' prepended to them. Read the spec for details. For agents in this situation, they will need to adjust the expected output of the tests accordingly. | +| `nonExpectedIntrinsics` | An array of names that represent the attributes that should **not** be set in the intrinsics section of the Transaction Trace.| + +### outputTransactionEvent + +The `outputTransactionEvent` hash contains two objects: + +| Name | Meaning | +| ---- | ------- | +| `expectedAttributes` | A set of key-value pairs that represent the attributes that should be set in the `Intrinsic` hash of the Transaction Event. | +| `nonExpectedAttributes` | An array of names that represent the attributes that should **not** be set in the `Intrinsic` hash of the Transaction Event. | + +### outputExternalRequestHeader + +The `outputExternalRequestHeader` hash contains two objects: + +| Name | Meaning | +| ---- | ------- | +| `expectedHeader` | The outbound header that should be added to external requests (similar to the CAT header), when the original request was made from a valid Synthetics request. | +| `nonExpectedHeader` | The outbound header that should **not** be added to external requests, when the original request was made from a non-Synthetics request. | diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/utilization/README.md b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/utilization/README.md new file mode 100644 index 00000000..4817ac93 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/cross_agent_tests/utilization/README.md @@ -0,0 +1,13 @@ +# The Utilization Tests + +The Utilization tests ensure that the appropriate information is being gathered for pricing. It is centered around ensuring the JSON is correct. Each JSON block is a test case, with potentially the following fields: + + - testname: The name of the test + - input_total_ram_mib: The total ram number calculated by the agent. + - input_logical_processors: The number of logical processors calculated by the agent. + - input_hostname: The hostname calculated by the agent. + - input_aws_id: The aws id determined by the agent. + - input_aws_type: The aws type determined by the agent. + - input_aws_zone: The aws zone determined by the agent. + - input_environment_variables: Any environment variables which have been set. + - expected_output_json: The expected JSON output from the agent for the utilization hash. diff --git a/vendor/github.com/newrelic/go-agent/internal/crossagent/crossagent.go b/vendor/github.com/newrelic/go-agent/internal/crossagent/crossagent.go new file mode 100644 index 00000000..0fbc734e --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/crossagent/crossagent.go @@ -0,0 +1,54 @@ +package crossagent + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "runtime" +) + +var ( + crossAgentDir = func() string { + if s := os.Getenv("NEW_RELIC_CROSS_AGENT_TESTS"); s != "" { + return s + } + _, here, _, _ := runtime.Caller(0) + return filepath.Join(filepath.Dir(here), "cross_agent_tests") + }() +) + +// ReadFile reads a file from the crossagent tests directory given as with +// ioutil.ReadFile. +func ReadFile(name string) ([]byte, error) { + return ioutil.ReadFile(filepath.Join(crossAgentDir, name)) +} + +// ReadJSON takes the name of a file and parses it using JSON.Unmarshal into +// the interface given. +func ReadJSON(name string, v interface{}) error { + data, err := ReadFile(name) + if err != nil { + return err + } + return json.Unmarshal(data, v) +} + +// ReadDir reads a directory relative to crossagent tests and returns an array +// of absolute filepaths of the files in that directory. +func ReadDir(name string) ([]string, error) { + dir := filepath.Join(crossAgentDir, name) + + entries, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + + var files []string + for _, info := range entries { + if !info.IsDir() { + files = append(files, filepath.Join(dir, info.Name())) + } + } + return files, nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/custom_event.go b/vendor/github.com/newrelic/go-agent/internal/custom_event.go new file mode 100644 index 00000000..3bd46d74 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/custom_event.go @@ -0,0 +1,108 @@ +package internal + +import ( + "bytes" + "fmt" + "regexp" + "time" +) + +// https://newrelic.atlassian.net/wiki/display/eng/Custom+Events+in+New+Relic+Agents + +var ( + eventTypeRegexRaw = `^[a-zA-Z0-9:_ ]+$` + eventTypeRegex = regexp.MustCompile(eventTypeRegexRaw) + + errEventTypeLength = fmt.Errorf("event type exceeds length limit of %d", + attributeKeyLengthLimit) + // ErrEventTypeRegex will be returned to caller of app.RecordCustomEvent + // if the event type is not valid. + ErrEventTypeRegex = fmt.Errorf("event type must match %s", eventTypeRegexRaw) + errNumAttributes = fmt.Errorf("maximum of %d attributes exceeded", + customEventAttributeLimit) +) + +// CustomEvent is a custom event. +type CustomEvent struct { + eventType string + timestamp time.Time + truncatedParams map[string]interface{} +} + +// WriteJSON prepares JSON in the format expected by the collector. +func (e *CustomEvent) WriteJSON(buf *bytes.Buffer) { + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('[') + buf.WriteByte('{') + w.stringField("type", e.eventType) + w.floatField("timestamp", timeToFloatSeconds(e.timestamp)) + buf.WriteByte('}') + + buf.WriteByte(',') + buf.WriteByte('{') + w = jsonFieldsWriter{buf: buf} + for key, val := range e.truncatedParams { + writeAttributeValueJSON(&w, key, val) + } + buf.WriteByte('}') + + buf.WriteByte(',') + buf.WriteByte('{') + buf.WriteByte('}') + buf.WriteByte(']') +} + +// MarshalJSON is used for testing. +func (e *CustomEvent) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(make([]byte, 0, 256)) + + e.WriteJSON(buf) + + return buf.Bytes(), nil +} + +func eventTypeValidate(eventType string) error { + if len(eventType) > attributeKeyLengthLimit { + return errEventTypeLength + } + if !eventTypeRegex.MatchString(eventType) { + return ErrEventTypeRegex + } + return nil +} + +// CreateCustomEvent creates a custom event. +func CreateCustomEvent(eventType string, params map[string]interface{}, now time.Time) (*CustomEvent, error) { + if err := eventTypeValidate(eventType); nil != err { + return nil, err + } + + if len(params) > customEventAttributeLimit { + return nil, errNumAttributes + } + + truncatedParams := make(map[string]interface{}) + for key, val := range params { + if err := validAttributeKey(key); nil != err { + return nil, err + } + + val = truncateStringValueIfLongInterface(val) + + if err := valueIsValid(val); nil != err { + return nil, err + } + truncatedParams[key] = val + } + + return &CustomEvent{ + eventType: eventType, + timestamp: now, + truncatedParams: truncatedParams, + }, nil +} + +// MergeIntoHarvest implements Harvestable. +func (e *CustomEvent) MergeIntoHarvest(h *Harvest) { + h.CustomEvents.Add(e) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/custom_event_test.go b/vendor/github.com/newrelic/go-agent/internal/custom_event_test.go new file mode 100644 index 00000000..b75b42e9 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/custom_event_test.go @@ -0,0 +1,224 @@ +package internal + +import ( + "encoding/json" + "strconv" + "testing" + "time" +) + +var ( + now = time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + strLen512 = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + strLen255 = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +) + +// Tests use a single key-value pair in params to ensure deterministic JSON +// ordering. + +func TestCreateCustomEventSuccess(t *testing.T) { + event, err := CreateCustomEvent("myEvent", map[string]interface{}{"alpha": 1}, now) + if nil != err { + t.Fatal(err) + } + js, err := json.Marshal(event) + if nil != err { + t.Fatal(err) + } + if string(js) != `[{"type":"myEvent","timestamp":1.41713646e+09},{"alpha":1},{}]` { + t.Fatal(string(js)) + } +} + +func TestInvalidEventTypeCharacter(t *testing.T) { + event, err := CreateCustomEvent("myEvent!", map[string]interface{}{"alpha": 1}, now) + if err != ErrEventTypeRegex { + t.Fatal(err) + } + if nil != event { + t.Fatal(event) + } +} + +func TestLongEventType(t *testing.T) { + event, err := CreateCustomEvent(strLen512, map[string]interface{}{"alpha": 1}, now) + if err != errEventTypeLength { + t.Fatal(err) + } + if nil != event { + t.Fatal(event) + } +} + +func TestNilParams(t *testing.T) { + event, err := CreateCustomEvent("myEvent", nil, now) + if nil != err { + t.Fatal(err) + } + js, err := json.Marshal(event) + if nil != err { + t.Fatal(err) + } + if string(js) != `[{"type":"myEvent","timestamp":1.41713646e+09},{},{}]` { + t.Fatal(string(js)) + } +} + +func TestMissingEventType(t *testing.T) { + event, err := CreateCustomEvent("", map[string]interface{}{"alpha": 1}, now) + if err != ErrEventTypeRegex { + t.Fatal(err) + } + if nil != event { + t.Fatal(event) + } +} + +func TestEmptyParams(t *testing.T) { + event, err := CreateCustomEvent("myEvent", map[string]interface{}{}, now) + if nil != err { + t.Fatal(err) + } + js, err := json.Marshal(event) + if nil != err { + t.Fatal(err) + } + if string(js) != `[{"type":"myEvent","timestamp":1.41713646e+09},{},{}]` { + t.Fatal(string(js)) + } +} + +func TestTruncatedStringValue(t *testing.T) { + event, err := CreateCustomEvent("myEvent", map[string]interface{}{"alpha": strLen512}, now) + if nil != err { + t.Fatal(err) + } + js, err := json.Marshal(event) + if nil != err { + t.Fatal(err) + } + if string(js) != `[{"type":"myEvent","timestamp":1.41713646e+09},{"alpha":"`+strLen255+`"},{}]` { + t.Fatal(string(js)) + } +} + +func TestInvalidValueType(t *testing.T) { + event, err := CreateCustomEvent("myEvent", map[string]interface{}{"alpha": []string{}}, now) + if _, ok := err.(ErrInvalidAttribute); !ok { + t.Fatal(err) + } + if nil != event { + t.Fatal(event) + } +} + +func TestInvalidCustomAttributeKey(t *testing.T) { + event, err := CreateCustomEvent("myEvent", map[string]interface{}{strLen512: 1}, now) + if nil == err { + t.Fatal(err) + } + if _, ok := err.(invalidAttributeKeyErr); !ok { + t.Fatal(err) + } + if nil != event { + t.Fatal(event) + } +} + +func TestTooManyAttributes(t *testing.T) { + params := make(map[string]interface{}) + for i := 0; i < customEventAttributeLimit+1; i++ { + params[strconv.Itoa(i)] = i + } + event, err := CreateCustomEvent("myEvent", params, now) + if errNumAttributes != err { + t.Fatal(err) + } + if nil != event { + t.Fatal(event) + } +} + +func TestCustomEventAttributeTypes(t *testing.T) { + testcases := []struct { + val interface{} + js string + }{ + {"string", `"string"`}, + {true, `true`}, + {false, `false`}, + {nil, `null`}, + {uint8(1), `1`}, + {uint16(1), `1`}, + {uint32(1), `1`}, + {uint64(1), `1`}, + {int8(1), `1`}, + {int16(1), `1`}, + {int32(1), `1`}, + {int64(1), `1`}, + {float32(1), `1`}, + {float64(1), `1`}, + {uint(1), `1`}, + {int(1), `1`}, + {uintptr(1), `1`}, + } + + for _, tc := range testcases { + event, err := CreateCustomEvent("myEvent", map[string]interface{}{"key": tc.val}, now) + if nil != err { + t.Fatal(err) + } + js, err := json.Marshal(event) + if nil != err { + t.Fatal(err) + } + if string(js) != `[{"type":"myEvent","timestamp":1.41713646e+09},{"key":`+tc.js+`},{}]` { + t.Fatal(string(js)) + } + } +} + +func TestCustomParamsCopied(t *testing.T) { + params := map[string]interface{}{"alpha": 1} + event, err := CreateCustomEvent("myEvent", params, now) + if nil != err { + t.Fatal(err) + } + // Attempt to change the params after the event created: + params["zip"] = "zap" + js, err := json.Marshal(event) + if nil != err { + t.Fatal(err) + } + if string(js) != `[{"type":"myEvent","timestamp":1.41713646e+09},{"alpha":1},{}]` { + t.Fatal(string(js)) + } +} + +func TestMultipleAttributeJSON(t *testing.T) { + params := map[string]interface{}{"alpha": 1, "beta": 2} + event, err := CreateCustomEvent("myEvent", params, now) + if nil != err { + t.Fatal(err) + } + js, err := json.Marshal(event) + if nil != err { + t.Fatal(err) + } + // Params order may not be deterministic, so we simply test that the + // JSON created is valid. + var valid interface{} + if err := json.Unmarshal(js, &valid); nil != err { + t.Error(string(js)) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/custom_events.go b/vendor/github.com/newrelic/go-agent/internal/custom_events.go new file mode 100644 index 00000000..44e6b973 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/custom_events.go @@ -0,0 +1,32 @@ +package internal + +import ( + "math/rand" + "time" +) + +type customEvents struct { + events *analyticsEvents +} + +func newCustomEvents(max int) *customEvents { + return &customEvents{ + events: newAnalyticsEvents(max), + } +} + +func (cs *customEvents) Add(e *CustomEvent) { + stamp := eventStamp(rand.Float32()) + cs.events.addEvent(analyticsEvent{stamp, e}) +} + +func (cs *customEvents) MergeIntoHarvest(h *Harvest) { + h.CustomEvents.events.mergeFailed(cs.events) +} + +func (cs *customEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return cs.events.CollectorJSON(agentRunID) +} + +func (cs *customEvents) numSeen() float64 { return cs.events.NumSeen() } +func (cs *customEvents) numSaved() float64 { return cs.events.NumSaved() } diff --git a/vendor/github.com/newrelic/go-agent/internal/environment.go b/vendor/github.com/newrelic/go-agent/internal/environment.go new file mode 100644 index 00000000..f7f27801 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/environment.go @@ -0,0 +1,61 @@ +package internal + +import ( + "encoding/json" + "reflect" + "runtime" +) + +// Environment describes the application's environment. +type Environment struct { + Compiler string `env:"runtime.Compiler"` + GOARCH string `env:"runtime.GOARCH"` + GOOS string `env:"runtime.GOOS"` + Version string `env:"runtime.Version"` + NumCPU int `env:"runtime.NumCPU"` +} + +var ( + // SampleEnvironment is useful for testing. + SampleEnvironment = Environment{ + Compiler: "comp", + GOARCH: "arch", + GOOS: "goos", + Version: "vers", + NumCPU: 8, + } +) + +// NewEnvironment returns a new Environment. +func NewEnvironment() Environment { + return Environment{ + Compiler: runtime.Compiler, + GOARCH: runtime.GOARCH, + GOOS: runtime.GOOS, + Version: runtime.Version(), + NumCPU: runtime.NumCPU(), + } +} + +// MarshalJSON prepares Environment JSON in the format expected by the collector +// during the connect command. +func (e Environment) MarshalJSON() ([]byte, error) { + var arr [][]interface{} + + val := reflect.ValueOf(e) + numFields := val.NumField() + + arr = make([][]interface{}, numFields) + + for i := 0; i < numFields; i++ { + v := val.Field(i) + t := val.Type().Field(i).Tag.Get("env") + + arr[i] = []interface{}{ + t, + v.Interface(), + } + } + + return json.Marshal(arr) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/environment_test.go b/vendor/github.com/newrelic/go-agent/internal/environment_test.go new file mode 100644 index 00000000..e3f3e15d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/environment_test.go @@ -0,0 +1,42 @@ +package internal + +import ( + "encoding/json" + "runtime" + "testing" +) + +func TestMarshalEnvironment(t *testing.T) { + js, err := json.Marshal(&SampleEnvironment) + if nil != err { + t.Fatal(err) + } + expect := CompactJSONString(`[ + ["runtime.Compiler","comp"], + ["runtime.GOARCH","arch"], + ["runtime.GOOS","goos"], + ["runtime.Version","vers"], + ["runtime.NumCPU",8]]`) + if string(js) != expect { + t.Fatal(string(js)) + } +} + +func TestEnvironmentFields(t *testing.T) { + env := NewEnvironment() + if env.Compiler != runtime.Compiler { + t.Error(env.Compiler, runtime.Compiler) + } + if env.GOARCH != runtime.GOARCH { + t.Error(env.GOARCH, runtime.GOARCH) + } + if env.GOOS != runtime.GOOS { + t.Error(env.GOOS, runtime.GOOS) + } + if env.Version != runtime.Version() { + t.Error(env.Version, runtime.Version()) + } + if env.NumCPU != runtime.NumCPU() { + t.Error(env.NumCPU, runtime.NumCPU()) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/error_events.go b/vendor/github.com/newrelic/go-agent/internal/error_events.go new file mode 100644 index 00000000..6c88e939 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/error_events.go @@ -0,0 +1,75 @@ +package internal + +import ( + "bytes" + "math/rand" + "time" +) + +// MarshalJSON is used for testing. +func (e *ErrorEvent) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(make([]byte, 0, 256)) + + e.WriteJSON(buf) + + return buf.Bytes(), nil +} + +// WriteJSON prepares JSON in the format expected by the collector. +// https://source.datanerd.us/agents/agent-specs/blob/master/Error-Events.md +func (e *ErrorEvent) WriteJSON(buf *bytes.Buffer) { + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('[') + buf.WriteByte('{') + w.stringField("type", "TransactionError") + w.stringField("error.class", e.Klass) + w.stringField("error.message", e.Msg) + w.floatField("timestamp", timeToFloatSeconds(e.When)) + w.stringField("transactionName", e.FinalName) + w.floatField("duration", e.Duration.Seconds()) + if e.Queuing > 0 { + w.floatField("queueDuration", e.Queuing.Seconds()) + } + if e.externalCallCount > 0 { + w.intField("externalCallCount", int64(e.externalCallCount)) + w.floatField("externalDuration", e.externalDuration.Seconds()) + } + if e.datastoreCallCount > 0 { + // Note that "database" is used for the keys here instead of + // "datastore" for historical reasons. + w.intField("databaseCallCount", int64(e.datastoreCallCount)) + w.floatField("databaseDuration", e.datastoreDuration.Seconds()) + } + buf.WriteByte('}') + buf.WriteByte(',') + userAttributesJSON(e.Attrs, buf, destError) + buf.WriteByte(',') + agentAttributesJSON(e.Attrs, buf, destError) + buf.WriteByte(']') +} + +type errorEvents struct { + events *analyticsEvents +} + +func newErrorEvents(max int) *errorEvents { + return &errorEvents{ + events: newAnalyticsEvents(max), + } +} + +func (events *errorEvents) Add(e *ErrorEvent) { + stamp := eventStamp(rand.Float32()) + events.events.addEvent(analyticsEvent{stamp, e}) +} + +func (events *errorEvents) MergeIntoHarvest(h *Harvest) { + h.ErrorEvents.events.mergeFailed(events.events) +} + +func (events *errorEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return events.events.CollectorJSON(agentRunID) +} + +func (events *errorEvents) numSeen() float64 { return events.events.NumSeen() } +func (events *errorEvents) numSaved() float64 { return events.events.NumSaved() } diff --git a/vendor/github.com/newrelic/go-agent/internal/error_events_test.go b/vendor/github.com/newrelic/go-agent/internal/error_events_test.go new file mode 100644 index 00000000..c47cc3e1 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/error_events_test.go @@ -0,0 +1,136 @@ +package internal + +import ( + "encoding/json" + "testing" + "time" +) + +func testErrorEventJSON(t *testing.T, e *ErrorEvent, expect string) { + js, err := json.Marshal(e) + if nil != err { + t.Error(err) + return + } + expect = CompactJSONString(expect) + if string(js) != expect { + t.Error(string(js), expect) + } +} + +var ( + sampleErrorData = ErrorData{ + Klass: "*errors.errorString", + Msg: "hello", + When: time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC), + } +) + +func TestErrorEventMarshal(t *testing.T) { + testErrorEventJSON(t, &ErrorEvent{ + ErrorData: sampleErrorData, + TxnEvent: TxnEvent{ + FinalName: "myName", + Duration: 3 * time.Second, + Attrs: nil, + }, + }, `[ + { + "type":"TransactionError", + "error.class":"*errors.errorString", + "error.message":"hello", + "timestamp":1.41713646e+09, + "transactionName":"myName", + "duration":3 + }, + {}, + {} + ]`) + testErrorEventJSON(t, &ErrorEvent{ + ErrorData: sampleErrorData, + TxnEvent: TxnEvent{ + FinalName: "myName", + Duration: 3 * time.Second, + Queuing: 5 * time.Second, + Attrs: nil, + }, + }, `[ + { + "type":"TransactionError", + "error.class":"*errors.errorString", + "error.message":"hello", + "timestamp":1.41713646e+09, + "transactionName":"myName", + "duration":3, + "queueDuration":5 + }, + {}, + {} + ]`) + testErrorEventJSON(t, &ErrorEvent{ + ErrorData: sampleErrorData, + TxnEvent: TxnEvent{ + FinalName: "myName", + Duration: 3 * time.Second, + Queuing: 5 * time.Second, + DatastoreExternalTotals: DatastoreExternalTotals{ + externalCallCount: 22, + externalDuration: 1122334 * time.Millisecond, + datastoreCallCount: 33, + datastoreDuration: 5566778 * time.Millisecond, + }, + }, + }, `[ + { + "type":"TransactionError", + "error.class":"*errors.errorString", + "error.message":"hello", + "timestamp":1.41713646e+09, + "transactionName":"myName", + "duration":3, + "queueDuration":5, + "externalCallCount":22, + "externalDuration":1122.334, + "databaseCallCount":33, + "databaseDuration":5566.778 + }, + {}, + {} + ]`) +} + +func TestErrorEventAttributes(t *testing.T) { + aci := sampleAttributeConfigInput + aci.ErrorCollector.Exclude = append(aci.ErrorCollector.Exclude, "zap") + aci.ErrorCollector.Exclude = append(aci.ErrorCollector.Exclude, hostDisplayName) + cfg := CreateAttributeConfig(aci) + attr := NewAttributes(cfg) + attr.Agent.HostDisplayName = "exclude me" + attr.Agent.RequestMethod = "GET" + AddUserAttribute(attr, "zap", 123, DestAll) + AddUserAttribute(attr, "zip", 456, DestAll) + + testErrorEventJSON(t, &ErrorEvent{ + ErrorData: sampleErrorData, + TxnEvent: TxnEvent{ + FinalName: "myName", + Duration: 3 * time.Second, + Attrs: attr, + }, + }, `[ + { + "type":"TransactionError", + "error.class":"*errors.errorString", + "error.message":"hello", + "timestamp":1.41713646e+09, + "transactionName":"myName", + "duration":3 + }, + { + "zip":456 + }, + { + "request.method":"GET" + } + ]`) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/errors.go b/vendor/github.com/newrelic/go-agent/internal/errors.go new file mode 100644 index 00000000..7ea78dd3 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/errors.go @@ -0,0 +1,169 @@ +package internal + +import ( + "bytes" + "fmt" + "net/http" + "strconv" + "time" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +const ( + // PanicErrorKlass is the error klass used for errors generated by + // recovering panics in txn.End. + PanicErrorKlass = "panic" +) + +func panicValueMsg(v interface{}) string { + switch val := v.(type) { + case error: + return val.Error() + default: + return fmt.Sprintf("%v", v) + } +} + +// TxnErrorFromPanic creates a new TxnError from a panic. +func TxnErrorFromPanic(now time.Time, v interface{}) ErrorData { + return ErrorData{ + When: now, + Msg: panicValueMsg(v), + Klass: PanicErrorKlass, + } +} + +// TxnErrorFromResponseCode creates a new TxnError from an http response code. +func TxnErrorFromResponseCode(now time.Time, code int) ErrorData { + return ErrorData{ + When: now, + Msg: http.StatusText(code), + Klass: strconv.Itoa(code), + } +} + +// ErrorData contains the information about a recorded error. +type ErrorData struct { + When time.Time + Stack StackTrace + Msg string + Klass string +} + +// TxnError combines error data with information about a transaction. TxnError is used for +// both error events and traced errors. +type TxnError struct { + ErrorData + TxnEvent +} + +// ErrorEvent and tracedError are separate types so that error events and traced errors can have +// different WriteJSON methods. +type ErrorEvent TxnError + +type tracedError TxnError + +// TxnErrors is a set of errors captured in a Transaction. +type TxnErrors []*ErrorData + +// NewTxnErrors returns a new empty TxnErrors. +func NewTxnErrors(max int) TxnErrors { + return make([]*ErrorData, 0, max) +} + +// Add adds a TxnError. +func (errors *TxnErrors) Add(e ErrorData) { + if len(*errors) < cap(*errors) { + *errors = append(*errors, &e) + } +} + +func (h *tracedError) WriteJSON(buf *bytes.Buffer) { + buf.WriteByte('[') + jsonx.AppendFloat(buf, timeToFloatMilliseconds(h.When)) + buf.WriteByte(',') + jsonx.AppendString(buf, h.FinalName) + buf.WriteByte(',') + jsonx.AppendString(buf, h.Msg) + buf.WriteByte(',') + jsonx.AppendString(buf, h.Klass) + buf.WriteByte(',') + + buf.WriteByte('{') + buf.WriteString(`"agentAttributes"`) + buf.WriteByte(':') + agentAttributesJSON(h.Attrs, buf, destError) + buf.WriteByte(',') + buf.WriteString(`"userAttributes"`) + buf.WriteByte(':') + userAttributesJSON(h.Attrs, buf, destError) + buf.WriteByte(',') + buf.WriteString(`"intrinsics"`) + buf.WriteByte(':') + buf.WriteString("{}") + if nil != h.Stack { + buf.WriteByte(',') + buf.WriteString(`"stack_trace"`) + buf.WriteByte(':') + h.Stack.WriteJSON(buf) + } + if h.CleanURL != "" { + buf.WriteByte(',') + buf.WriteString(`"request_uri"`) + buf.WriteByte(':') + jsonx.AppendString(buf, h.CleanURL) + } + buf.WriteByte('}') + + buf.WriteByte(']') +} + +// MarshalJSON is used for testing. +func (h *tracedError) MarshalJSON() ([]byte, error) { + buf := &bytes.Buffer{} + h.WriteJSON(buf) + return buf.Bytes(), nil +} + +type harvestErrors []*tracedError + +func newHarvestErrors(max int) harvestErrors { + return make([]*tracedError, 0, max) +} + +// MergeTxnErrors merges a transaction's errors into the harvest's errors. +func MergeTxnErrors(errors *harvestErrors, errs TxnErrors, txnEvent TxnEvent) { + for _, e := range errs { + if len(*errors) == cap(*errors) { + return + } + *errors = append(*errors, &tracedError{ + TxnEvent: txnEvent, + ErrorData: *e, + }) + } +} + +func (errors harvestErrors) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + if 0 == len(errors) { + return nil, nil + } + estimate := 1024 * len(errors) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + buf.WriteByte('[') + jsonx.AppendString(buf, agentRunID) + buf.WriteByte(',') + buf.WriteByte('[') + for i, e := range errors { + if i > 0 { + buf.WriteByte(',') + } + e.WriteJSON(buf) + } + buf.WriteByte(']') + buf.WriteByte(']') + return buf.Bytes(), nil +} + +func (errors harvestErrors) MergeIntoHarvest(h *Harvest) {} diff --git a/vendor/github.com/newrelic/go-agent/internal/errors_test.go b/vendor/github.com/newrelic/go-agent/internal/errors_test.go new file mode 100644 index 00000000..21e14936 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/errors_test.go @@ -0,0 +1,197 @@ +package internal + +import ( + "encoding/json" + "errors" + "testing" + "time" +) + +var ( + emptyStackTrace = make([]uintptr, 0) +) + +func TestErrorTraceMarshal(t *testing.T) { + he := &tracedError{ + ErrorData: ErrorData{ + When: time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC), + Stack: emptyStackTrace, + Msg: "my_msg", + Klass: "my_class", + }, + TxnEvent: TxnEvent{ + FinalName: "my_txn_name", + CleanURL: "my_request_uri", + Attrs: nil, + }, + } + js, err := json.Marshal(he) + if nil != err { + t.Error(err) + } + expect := CompactJSONString(` + [ + 1.41713646e+12, + "my_txn_name", + "my_msg", + "my_class", + { + "agentAttributes":{}, + "userAttributes":{}, + "intrinsics":{}, + "stack_trace":[], + "request_uri":"my_request_uri" + } + ]`) + if string(js) != expect { + t.Error(string(js)) + } +} + +func TestErrorTraceAttributes(t *testing.T) { + aci := sampleAttributeConfigInput + aci.ErrorCollector.Exclude = append(aci.ErrorCollector.Exclude, "zap") + aci.ErrorCollector.Exclude = append(aci.ErrorCollector.Exclude, hostDisplayName) + cfg := CreateAttributeConfig(aci) + attr := NewAttributes(cfg) + attr.Agent.HostDisplayName = "exclude me" + attr.Agent.RequestMethod = "GET" + AddUserAttribute(attr, "zap", 123, DestAll) + AddUserAttribute(attr, "zip", 456, DestAll) + + he := &tracedError{ + ErrorData: ErrorData{ + When: time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC), + Stack: nil, + Msg: "my_msg", + Klass: "my_class", + }, + TxnEvent: TxnEvent{ + FinalName: "my_txn_name", + CleanURL: "my_request_uri", + Attrs: attr, + }, + } + js, err := json.Marshal(he) + if nil != err { + t.Error(err) + } + expect := CompactJSONString(` + [ + 1.41713646e+12, + "my_txn_name", + "my_msg", + "my_class", + { + "agentAttributes":{"request.method":"GET"}, + "userAttributes":{"zip":456}, + "intrinsics":{}, + "request_uri":"my_request_uri" + } + ]`) + if string(js) != expect { + t.Error(string(js)) + } +} + +func TestErrorsLifecycle(t *testing.T) { + ers := NewTxnErrors(5) + + when := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + ers.Add(TxnErrorFromResponseCode(when, 400)) + ers.Add(TxnErrorFromPanic(when, errors.New("oh no panic"))) + ers.Add(TxnErrorFromPanic(when, 123)) + ers.Add(TxnErrorFromPanic(when, 123)) + + he := newHarvestErrors(3) + MergeTxnErrors(&he, ers, TxnEvent{ + FinalName: "txnName", + CleanURL: "requestURI", + Attrs: nil, + }) + js, err := he.Data("agentRunID", time.Now()) + if nil != err { + t.Error(err) + } + expect := CompactJSONString(` +[ + "agentRunID", + [ + [ + 1.41713646e+12, + "txnName", + "Bad Request", + "400", + { + "agentAttributes":{}, + "userAttributes":{}, + "intrinsics":{}, + "request_uri":"requestURI" + } + ], + [ + 1.41713646e+12, + "txnName", + "oh no panic", + "panic", + { + "agentAttributes":{}, + "userAttributes":{}, + "intrinsics":{}, + "request_uri":"requestURI" + } + ], + [ + 1.41713646e+12, + "txnName", + "123", + "panic", + { + "agentAttributes":{}, + "userAttributes":{}, + "intrinsics":{}, + "request_uri":"requestURI" + } + ] + ] +]`) + if string(js) != expect { + t.Error(string(js), expect) + } +} + +func BenchmarkErrorsJSON(b *testing.B) { + when := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + max := 20 + ers := NewTxnErrors(max) + + for i := 0; i < max; i++ { + ers.Add(ErrorData{ + When: time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC), + Msg: "error message", + Klass: "error class", + }) + } + + cfg := CreateAttributeConfig(sampleAttributeConfigInput) + attr := NewAttributes(cfg) + attr.Agent.RequestMethod = "GET" + AddUserAttribute(attr, "zip", 456, DestAll) + + he := newHarvestErrors(max) + MergeTxnErrors(&he, ers, TxnEvent{ + FinalName: "WebTransaction/Go/hello", + CleanURL: "/url", + Attrs: attr, + }) + + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < b.N; n++ { + js, err := he.Data("agentRundID", when) + if nil != err || nil == js { + b.Fatal(err, js) + } + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/expect.go b/vendor/github.com/newrelic/go-agent/internal/expect.go new file mode 100644 index 00000000..20073db3 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/expect.go @@ -0,0 +1,380 @@ +package internal + +import ( + "encoding/json" + "fmt" + "runtime" +) + +var ( + // Unfortunately, the resolution of time.Now() on Windows is coarse: Two + // sequential calls to time.Now() may return the same value, and tests + // which expect non-zero durations may fail. To avoid adding sleep + // statements or mocking time.Now(), those tests are skipped on Windows. + doDurationTests = runtime.GOOS != `windows` +) + +// Validator is used for testing. +type Validator interface { + Error(...interface{}) +} + +func validateStringField(v Validator, fieldName, v1, v2 string) { + if v1 != v2 { + v.Error(fieldName, v1, v2) + } +} + +type addValidatorField struct { + field interface{} + original Validator +} + +func (a addValidatorField) Error(fields ...interface{}) { + fields = append([]interface{}{a.field}, fields...) + a.original.Error(fields...) +} + +// ExtendValidator is used to add more context to a validator. +func ExtendValidator(v Validator, field interface{}) Validator { + return addValidatorField{ + field: field, + original: v, + } +} + +// WantMetric is a metric expectation. If Data is nil, then any data values are +// acceptable. +type WantMetric struct { + Name string + Scope string + Forced interface{} // true, false, or nil + Data []float64 +} + +// WantError is a traced error expectation. +type WantError struct { + TxnName string + Msg string + Klass string + Caller string + URL string + UserAttributes map[string]interface{} + AgentAttributes map[string]interface{} +} + +func uniquePointer() *struct{} { + s := struct{}{} + return &s +} + +var ( + // MatchAnything is for use when matching attributes. + MatchAnything = uniquePointer() +) + +// WantEvent is a transaction or error event expectation. +type WantEvent struct { + Intrinsics map[string]interface{} + UserAttributes map[string]interface{} + AgentAttributes map[string]interface{} +} + +// WantTxnTrace is a transaction trace expectation. +type WantTxnTrace struct { + MetricName string + CleanURL string + NumSegments int + UserAttributes map[string]interface{} + AgentAttributes map[string]interface{} +} + +// WantSlowQuery is a slowQuery expectation. +type WantSlowQuery struct { + Count int32 + MetricName string + Query string + TxnName string + TxnURL string + DatabaseName string + Host string + PortPathOrID string + Params map[string]interface{} +} + +// Expect exposes methods that allow for testing whether the correct data was +// captured. +type Expect interface { + ExpectCustomEvents(t Validator, want []WantEvent) + ExpectErrors(t Validator, want []WantError) + ExpectErrorEvents(t Validator, want []WantEvent) + ExpectTxnEvents(t Validator, want []WantEvent) + ExpectMetrics(t Validator, want []WantMetric) + ExpectTxnTraces(t Validator, want []WantTxnTrace) + ExpectSlowQueries(t Validator, want []WantSlowQuery) +} + +func expectMetricField(t Validator, id metricID, v1, v2 float64, fieldName string) { + if v1 != v2 { + t.Error("metric fields do not match", id, v1, v2, fieldName) + } +} + +// ExpectMetrics allows testing of metrics. +func ExpectMetrics(t Validator, mt *metricTable, expect []WantMetric) { + if len(mt.metrics) != len(expect) { + t.Error("metric counts do not match expectations", len(mt.metrics), len(expect)) + } + expectedIds := make(map[metricID]struct{}) + for _, e := range expect { + id := metricID{Name: e.Name, Scope: e.Scope} + expectedIds[id] = struct{}{} + m := mt.metrics[id] + if nil == m { + t.Error("unable to find metric", id) + continue + } + + if b, ok := e.Forced.(bool); ok { + if b != (forced == m.forced) { + t.Error("metric forced incorrect", b, m.forced, id) + } + } + + if nil != e.Data { + expectMetricField(t, id, e.Data[0], m.data.countSatisfied, "countSatisfied") + expectMetricField(t, id, e.Data[1], m.data.totalTolerated, "totalTolerated") + expectMetricField(t, id, e.Data[2], m.data.exclusiveFailed, "exclusiveFailed") + expectMetricField(t, id, e.Data[3], m.data.min, "min") + expectMetricField(t, id, e.Data[4], m.data.max, "max") + expectMetricField(t, id, e.Data[5], m.data.sumSquares, "sumSquares") + } + } + for id := range mt.metrics { + if _, ok := expectedIds[id]; !ok { + t.Error("expected metrics does not contain", id.Name, id.Scope) + } + } +} + +func expectAttributes(v Validator, exists map[string]interface{}, expect map[string]interface{}) { + // TODO: This params comparison can be made smarter: Alert differences + // based on sub/super set behavior. + if len(exists) != len(expect) { + v.Error("attributes length difference", len(exists), len(expect)) + } + for key, val := range expect { + found, ok := exists[key] + if !ok { + v.Error("expected attribute not found: ", key) + continue + } + if val == MatchAnything { + continue + } + v1 := fmt.Sprint(found) + v2 := fmt.Sprint(val) + if v1 != v2 { + v.Error("value difference", fmt.Sprintf("key=%s", key), v1, v2) + } + } +} + +// ExpectCustomEvents allows testing of custom events. +func ExpectCustomEvents(v Validator, cs *customEvents, expect []WantEvent) { + if len(cs.events.events) != len(expect) { + v.Error("number of custom events does not match", len(cs.events.events), + len(expect)) + return + } + for i, e := range expect { + event, ok := cs.events.events[i].jsonWriter.(*CustomEvent) + if !ok { + v.Error("wrong custom event") + } else { + expectEvent(v, event, e) + } + } +} + +func expectEvent(v Validator, e json.Marshaler, expect WantEvent) { + js, err := e.MarshalJSON() + if nil != err { + v.Error("unable to marshal event", err) + return + } + var event []map[string]interface{} + err = json.Unmarshal(js, &event) + if nil != err { + v.Error("unable to parse event json", err) + return + } + intrinsics := event[0] + userAttributes := event[1] + agentAttributes := event[2] + + if nil != expect.Intrinsics { + expectAttributes(v, intrinsics, expect.Intrinsics) + } + if nil != expect.UserAttributes { + expectAttributes(v, userAttributes, expect.UserAttributes) + } + if nil != expect.AgentAttributes { + expectAttributes(v, agentAttributes, expect.AgentAttributes) + } +} + +// Second attributes have priority. +func mergeAttributes(a1, a2 map[string]interface{}) map[string]interface{} { + a := make(map[string]interface{}) + for k, v := range a1 { + a[k] = v + } + for k, v := range a2 { + a[k] = v + } + return a +} + +// ExpectErrorEvents allows testing of error events. +func ExpectErrorEvents(v Validator, events *errorEvents, expect []WantEvent) { + if len(events.events.events) != len(expect) { + v.Error("number of custom events does not match", + len(events.events.events), len(expect)) + return + } + for i, e := range expect { + event, ok := events.events.events[i].jsonWriter.(*ErrorEvent) + if !ok { + v.Error("wrong error event") + } else { + if nil != e.Intrinsics { + e.Intrinsics = mergeAttributes(map[string]interface{}{ + // The following intrinsics should always be present in + // error events: + "type": "TransactionError", + "timestamp": MatchAnything, + "duration": MatchAnything, + }, e.Intrinsics) + } + expectEvent(v, event, e) + } + } +} + +// ExpectTxnEvents allows testing of txn events. +func ExpectTxnEvents(v Validator, events *txnEvents, expect []WantEvent) { + if len(events.events.events) != len(expect) { + v.Error("number of txn events does not match", + len(events.events.events), len(expect)) + return + } + for i, e := range expect { + event, ok := events.events.events[i].jsonWriter.(*TxnEvent) + if !ok { + v.Error("wrong txn event") + } else { + if nil != e.Intrinsics { + e.Intrinsics = mergeAttributes(map[string]interface{}{ + // The following intrinsics should always be present in + // txn events: + "type": "Transaction", + "timestamp": MatchAnything, + "duration": MatchAnything, + }, e.Intrinsics) + } + expectEvent(v, event, e) + } + } +} + +func expectError(v Validator, err *tracedError, expect WantError) { + caller := topCallerNameBase(err.ErrorData.Stack) + validateStringField(v, "caller", expect.Caller, caller) + validateStringField(v, "txnName", expect.TxnName, err.FinalName) + validateStringField(v, "klass", expect.Klass, err.Klass) + validateStringField(v, "msg", expect.Msg, err.Msg) + validateStringField(v, "URL", expect.URL, err.CleanURL) + if nil != expect.UserAttributes { + expectAttributes(v, getUserAttributes(err.Attrs, destError), expect.UserAttributes) + } + if nil != expect.AgentAttributes { + expectAttributes(v, getAgentAttributes(err.Attrs, destError), expect.AgentAttributes) + } +} + +// ExpectErrors allows testing of errors. +func ExpectErrors(v Validator, errors harvestErrors, expect []WantError) { + if len(errors) != len(expect) { + v.Error("number of errors mismatch", len(errors), len(expect)) + return + } + for i, e := range expect { + expectError(v, errors[i], e) + } +} + +func expectTxnTrace(v Validator, trace *HarvestTrace, expect WantTxnTrace) { + if doDurationTests && 0 == trace.Duration { + v.Error("zero trace duration") + } + validateStringField(v, "metric name", expect.MetricName, trace.FinalName) + validateStringField(v, "request url", expect.CleanURL, trace.CleanURL) + if nil != expect.UserAttributes { + expectAttributes(v, getUserAttributes(trace.Attrs, destTxnTrace), expect.UserAttributes) + } + if nil != expect.AgentAttributes { + expectAttributes(v, getAgentAttributes(trace.Attrs, destTxnTrace), expect.AgentAttributes) + } + if expect.NumSegments != len(trace.Trace.nodes) { + v.Error("wrong number of segments", expect.NumSegments, len(trace.Trace.nodes)) + } +} + +// ExpectTxnTraces allows testing of transaction traces. +func ExpectTxnTraces(v Validator, traces *harvestTraces, want []WantTxnTrace) { + if len(want) == 0 { + if nil != traces.trace { + v.Error("trace exists when not expected") + } + } else if len(want) > 1 { + v.Error("too many traces expected") + } else { + if nil == traces.trace { + v.Error("missing expected trace") + } else { + expectTxnTrace(v, traces.trace, want[0]) + } + } +} + +func expectSlowQuery(t Validator, slowQuery *slowQuery, want WantSlowQuery) { + if slowQuery.Count != want.Count { + t.Error("wrong Count field", slowQuery.Count, want.Count) + } + validateStringField(t, "MetricName", slowQuery.DatastoreMetric, want.MetricName) + validateStringField(t, "Query", slowQuery.ParameterizedQuery, want.Query) + validateStringField(t, "TxnName", slowQuery.TxnName, want.TxnName) + validateStringField(t, "TxnURL", slowQuery.TxnURL, want.TxnURL) + validateStringField(t, "DatabaseName", slowQuery.DatabaseName, want.DatabaseName) + validateStringField(t, "Host", slowQuery.Host, want.Host) + validateStringField(t, "PortPathOrID", slowQuery.PortPathOrID, want.PortPathOrID) + expectAttributes(t, map[string]interface{}(slowQuery.QueryParameters), want.Params) +} + +// ExpectSlowQueries allows testing of slow queries. +func ExpectSlowQueries(t Validator, slowQueries *slowQueries, want []WantSlowQuery) { + if len(want) != len(slowQueries.priorityQueue) { + t.Error("wrong number of slow queries", + "expected", len(want), "got", len(slowQueries.priorityQueue)) + return + } + for _, s := range want { + idx, ok := slowQueries.lookup[s.Query] + if !ok { + t.Error("unable to find slow query", s.Query) + continue + } + expectSlowQuery(t, slowQueries.priorityQueue[idx], s) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/harvest.go b/vendor/github.com/newrelic/go-agent/internal/harvest.go new file mode 100644 index 00000000..8c036cd9 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/harvest.go @@ -0,0 +1,137 @@ +package internal + +import ( + "strings" + "sync" + "time" +) + +// Harvestable is something that can be merged into a Harvest. +type Harvestable interface { + MergeIntoHarvest(h *Harvest) +} + +// Harvest contains collected data. +type Harvest struct { + Metrics *metricTable + CustomEvents *customEvents + TxnEvents *txnEvents + ErrorEvents *errorEvents + ErrorTraces harvestErrors + TxnTraces *harvestTraces + SlowSQLs *slowQueries +} + +// Payloads returns a map from expected collector method name to data type. +func (h *Harvest) Payloads() map[string]PayloadCreator { + return map[string]PayloadCreator{ + cmdMetrics: h.Metrics, + cmdCustomEvents: h.CustomEvents, + cmdTxnEvents: h.TxnEvents, + cmdErrorEvents: h.ErrorEvents, + cmdErrorData: h.ErrorTraces, + cmdTxnTraces: h.TxnTraces, + cmdSlowSQLs: h.SlowSQLs, + } +} + +// NewHarvest returns a new Harvest. +func NewHarvest(now time.Time) *Harvest { + return &Harvest{ + Metrics: newMetricTable(maxMetrics, now), + CustomEvents: newCustomEvents(maxCustomEvents), + TxnEvents: newTxnEvents(maxTxnEvents), + ErrorEvents: newErrorEvents(maxErrorEvents), + ErrorTraces: newHarvestErrors(maxHarvestErrors), + TxnTraces: newHarvestTraces(), + SlowSQLs: newSlowQueries(maxHarvestSlowSQLs), + } +} + +var ( + trackMutex sync.Mutex + trackMetrics []string +) + +// TrackUsage helps track which integration packages are used. +func TrackUsage(s ...string) { + trackMutex.Lock() + defer trackMutex.Unlock() + + m := "Supportability/" + strings.Join(s, "/") + trackMetrics = append(trackMetrics, m) +} + +func createTrackUsageMetrics(metrics *metricTable) { + trackMutex.Lock() + defer trackMutex.Unlock() + + for _, m := range trackMetrics { + metrics.addSingleCount(m, forced) + } +} + +// CreateFinalMetrics creates extra metrics at harvest time. +func (h *Harvest) CreateFinalMetrics() { + h.Metrics.addSingleCount(instanceReporting, forced) + + h.Metrics.addCount(customEventsSeen, h.CustomEvents.numSeen(), forced) + h.Metrics.addCount(customEventsSent, h.CustomEvents.numSaved(), forced) + + h.Metrics.addCount(txnEventsSeen, h.TxnEvents.numSeen(), forced) + h.Metrics.addCount(txnEventsSent, h.TxnEvents.numSaved(), forced) + + h.Metrics.addCount(errorEventsSeen, h.ErrorEvents.numSeen(), forced) + h.Metrics.addCount(errorEventsSent, h.ErrorEvents.numSaved(), forced) + + if h.Metrics.numDropped > 0 { + h.Metrics.addCount(supportabilityDropped, float64(h.Metrics.numDropped), forced) + } + + createTrackUsageMetrics(h.Metrics) +} + +// PayloadCreator is a data type in the harvest. +type PayloadCreator interface { + // In the event of a rpm request failure (hopefully simply an + // intermittent collector issue) the payload may be merged into the next + // time period's harvest. + Harvestable + // Data prepares JSON in the format expected by the collector endpoint. + // This method should return (nil, nil) if the payload is empty and no + // rpm request is necessary. + Data(agentRunID string, harvestStart time.Time) ([]byte, error) +} + +// CreateTxnMetrics creates metrics for a transaction. +func CreateTxnMetrics(args *TxnData, metrics *metricTable) { + // Duration Metrics + rollup := backgroundRollup + if args.IsWeb { + rollup = webRollup + metrics.addDuration(dispatcherMetric, "", args.Duration, 0, forced) + } + + metrics.addDuration(args.FinalName, "", args.Duration, args.Exclusive, forced) + metrics.addDuration(rollup, "", args.Duration, args.Exclusive, forced) + + // Apdex Metrics + if args.Zone != ApdexNone { + metrics.addApdex(apdexRollup, "", args.ApdexThreshold, args.Zone, forced) + + mname := apdexPrefix + removeFirstSegment(args.FinalName) + metrics.addApdex(mname, "", args.ApdexThreshold, args.Zone, unforced) + } + + // Error Metrics + if args.HasErrors() { + metrics.addSingleCount(errorsRollupMetric.all, forced) + metrics.addSingleCount(errorsRollupMetric.webOrOther(args.IsWeb), forced) + metrics.addSingleCount(errorsPrefix+args.FinalName, forced) + } + + // Queueing Metrics + if args.Queuing > 0 { + metrics.addDuration(queueMetric, "", args.Queuing, args.Queuing, forced) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/harvest_test.go b/vendor/github.com/newrelic/go-agent/internal/harvest_test.go new file mode 100644 index 00000000..e5a4a259 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/harvest_test.go @@ -0,0 +1,270 @@ +package internal + +import ( + "testing" + "time" +) + +func TestCreateFinalMetrics(t *testing.T) { + now := time.Now() + + h := NewHarvest(now) + h.CreateFinalMetrics() + ExpectMetrics(t, h.Metrics, []WantMetric{ + {instanceReporting, "", true, []float64{1, 0, 0, 0, 0, 0}}, + {customEventsSeen, "", true, []float64{0, 0, 0, 0, 0, 0}}, + {customEventsSent, "", true, []float64{0, 0, 0, 0, 0, 0}}, + {txnEventsSeen, "", true, []float64{0, 0, 0, 0, 0, 0}}, + {txnEventsSent, "", true, []float64{0, 0, 0, 0, 0, 0}}, + {errorEventsSeen, "", true, []float64{0, 0, 0, 0, 0, 0}}, + {errorEventsSent, "", true, []float64{0, 0, 0, 0, 0, 0}}, + }) + + h = NewHarvest(now) + h.Metrics = newMetricTable(0, now) + h.CustomEvents = newCustomEvents(1) + h.TxnEvents = newTxnEvents(1) + h.ErrorEvents = newErrorEvents(1) + + h.Metrics.addSingleCount("drop me!", unforced) + + customE, err := CreateCustomEvent("my event type", map[string]interface{}{"zip": 1}, time.Now()) + if nil != err { + t.Fatal(err) + } + h.CustomEvents.Add(customE) + h.CustomEvents.Add(customE) + + txnE := &TxnEvent{} + h.TxnEvents.AddTxnEvent(txnE) + h.TxnEvents.AddTxnEvent(txnE) + + h.ErrorEvents.Add(&ErrorEvent{}) + h.ErrorEvents.Add(&ErrorEvent{}) + + h.CreateFinalMetrics() + ExpectMetrics(t, h.Metrics, []WantMetric{ + {instanceReporting, "", true, []float64{1, 0, 0, 0, 0, 0}}, + {customEventsSeen, "", true, []float64{2, 0, 0, 0, 0, 0}}, + {customEventsSent, "", true, []float64{1, 0, 0, 0, 0, 0}}, + {txnEventsSeen, "", true, []float64{2, 0, 0, 0, 0, 0}}, + {txnEventsSent, "", true, []float64{1, 0, 0, 0, 0, 0}}, + {errorEventsSeen, "", true, []float64{2, 0, 0, 0, 0, 0}}, + {errorEventsSent, "", true, []float64{1, 0, 0, 0, 0, 0}}, + {supportabilityDropped, "", true, []float64{1, 0, 0, 0, 0, 0}}, + }) +} + +func TestEmptyPayloads(t *testing.T) { + h := NewHarvest(time.Now()) + payloads := h.Payloads() + for _, p := range payloads { + d, err := p.Data("agentRunID", time.Now()) + if d != nil || err != nil { + t.Error(d, err) + } + } +} + +func TestMergeFailedHarvest(t *testing.T) { + start1 := time.Now() + start2 := start1.Add(1 * time.Minute) + h := NewHarvest(start1) + h.Metrics.addCount("zip", 1, forced) + h.TxnEvents.AddTxnEvent(&TxnEvent{ + FinalName: "finalName", + Start: time.Now(), + Duration: 1 * time.Second, + }) + customEventParams := map[string]interface{}{"zip": 1} + ce, err := CreateCustomEvent("myEvent", customEventParams, time.Now()) + if nil != err { + t.Fatal(err) + } + h.CustomEvents.Add(ce) + h.ErrorEvents.Add(&ErrorEvent{ + ErrorData: ErrorData{ + Klass: "klass", + Msg: "msg", + When: time.Now(), + }, + TxnEvent: TxnEvent{ + FinalName: "finalName", + Duration: 1 * time.Second, + }, + }) + + ers := NewTxnErrors(10) + ers.Add(ErrorData{ + When: time.Now(), + Msg: "msg", + Klass: "klass", + Stack: GetStackTrace(0), + }) + MergeTxnErrors(&h.ErrorTraces, ers, TxnEvent{ + FinalName: "finalName", + CleanURL: "requestURI", + Attrs: nil, + }) + + if start1 != h.Metrics.metricPeriodStart { + t.Error(h.Metrics.metricPeriodStart) + } + if 0 != h.Metrics.failedHarvests { + t.Error(h.Metrics.failedHarvests) + } + if 0 != h.CustomEvents.events.failedHarvests { + t.Error(h.CustomEvents.events.failedHarvests) + } + if 0 != h.TxnEvents.events.failedHarvests { + t.Error(h.TxnEvents.events.failedHarvests) + } + if 0 != h.ErrorEvents.events.failedHarvests { + t.Error(h.ErrorEvents.events.failedHarvests) + } + ExpectMetrics(t, h.Metrics, []WantMetric{ + {"zip", "", true, []float64{1, 0, 0, 0, 0, 0}}, + }) + ExpectCustomEvents(t, h.CustomEvents, []WantEvent{{ + Intrinsics: map[string]interface{}{ + "type": "myEvent", + "timestamp": MatchAnything, + }, + UserAttributes: customEventParams, + }}) + ExpectErrorEvents(t, h.ErrorEvents, []WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "klass", + "error.message": "msg", + "transactionName": "finalName", + }, + }}) + ExpectTxnEvents(t, h.TxnEvents, []WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "finalName", + }, + }}) + ExpectErrors(t, h.ErrorTraces, []WantError{{ + TxnName: "finalName", + Msg: "msg", + Klass: "klass", + Caller: "internal.TestMergeFailedHarvest", + URL: "requestURI", + }}) + + nextHarvest := NewHarvest(start2) + if start2 != nextHarvest.Metrics.metricPeriodStart { + t.Error(nextHarvest.Metrics.metricPeriodStart) + } + payloads := h.Payloads() + for _, p := range payloads { + p.MergeIntoHarvest(nextHarvest) + } + + if start1 != nextHarvest.Metrics.metricPeriodStart { + t.Error(nextHarvest.Metrics.metricPeriodStart) + } + if 1 != nextHarvest.Metrics.failedHarvests { + t.Error(nextHarvest.Metrics.failedHarvests) + } + if 1 != nextHarvest.CustomEvents.events.failedHarvests { + t.Error(nextHarvest.CustomEvents.events.failedHarvests) + } + if 1 != nextHarvest.TxnEvents.events.failedHarvests { + t.Error(nextHarvest.TxnEvents.events.failedHarvests) + } + if 1 != nextHarvest.ErrorEvents.events.failedHarvests { + t.Error(nextHarvest.ErrorEvents.events.failedHarvests) + } + ExpectMetrics(t, nextHarvest.Metrics, []WantMetric{ + {"zip", "", true, []float64{1, 0, 0, 0, 0, 0}}, + }) + ExpectCustomEvents(t, nextHarvest.CustomEvents, []WantEvent{{ + Intrinsics: map[string]interface{}{ + "type": "myEvent", + "timestamp": MatchAnything, + }, + UserAttributes: customEventParams, + }}) + ExpectErrorEvents(t, nextHarvest.ErrorEvents, []WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "klass", + "error.message": "msg", + "transactionName": "finalName", + }, + }}) + ExpectTxnEvents(t, nextHarvest.TxnEvents, []WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "finalName", + }, + }}) + ExpectErrors(t, nextHarvest.ErrorTraces, []WantError{}) +} + +func TestCreateTxnMetrics(t *testing.T) { + txnErr := &ErrorData{} + txnErrors := []*ErrorData{txnErr} + webName := "WebTransaction/zip/zap" + backgroundName := "OtherTransaction/zip/zap" + args := &TxnData{} + args.Duration = 123 * time.Second + args.Exclusive = 109 * time.Second + args.ApdexThreshold = 2 * time.Second + + args.FinalName = webName + args.IsWeb = true + args.Errors = txnErrors + args.Zone = ApdexTolerating + metrics := newMetricTable(100, time.Now()) + CreateTxnMetrics(args, metrics) + ExpectMetrics(t, metrics, []WantMetric{ + {webName, "", true, []float64{1, 123, 109, 123, 123, 123 * 123}}, + {webRollup, "", true, []float64{1, 123, 109, 123, 123, 123 * 123}}, + {dispatcherMetric, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {"Errors/all", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"Errors/allWeb", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"Errors/" + webName, "", true, []float64{1, 0, 0, 0, 0, 0}}, + {apdexRollup, "", true, []float64{0, 1, 0, 2, 2, 0}}, + {"Apdex/zip/zap", "", false, []float64{0, 1, 0, 2, 2, 0}}, + }) + + args.FinalName = webName + args.IsWeb = true + args.Errors = nil + args.Zone = ApdexTolerating + metrics = newMetricTable(100, time.Now()) + CreateTxnMetrics(args, metrics) + ExpectMetrics(t, metrics, []WantMetric{ + {webName, "", true, []float64{1, 123, 109, 123, 123, 123 * 123}}, + {webRollup, "", true, []float64{1, 123, 109, 123, 123, 123 * 123}}, + {dispatcherMetric, "", true, []float64{1, 123, 0, 123, 123, 123 * 123}}, + {apdexRollup, "", true, []float64{0, 1, 0, 2, 2, 0}}, + {"Apdex/zip/zap", "", false, []float64{0, 1, 0, 2, 2, 0}}, + }) + + args.FinalName = backgroundName + args.IsWeb = false + args.Errors = txnErrors + args.Zone = ApdexNone + metrics = newMetricTable(100, time.Now()) + CreateTxnMetrics(args, metrics) + ExpectMetrics(t, metrics, []WantMetric{ + {backgroundName, "", true, []float64{1, 123, 109, 123, 123, 123 * 123}}, + {backgroundRollup, "", true, []float64{1, 123, 109, 123, 123, 123 * 123}}, + {"Errors/all", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"Errors/allOther", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"Errors/" + backgroundName, "", true, []float64{1, 0, 0, 0, 0, 0}}, + }) + + args.FinalName = backgroundName + args.IsWeb = false + args.Errors = nil + args.Zone = ApdexNone + metrics = newMetricTable(100, time.Now()) + CreateTxnMetrics(args, metrics) + ExpectMetrics(t, metrics, []WantMetric{ + {backgroundName, "", true, []float64{1, 123, 109, 123, 123, 123 * 123}}, + {backgroundRollup, "", true, []float64{1, 123, 109, 123, 123, 123 * 123}}, + }) + +} diff --git a/vendor/github.com/newrelic/go-agent/internal/json_object_writer.go b/vendor/github.com/newrelic/go-agent/internal/json_object_writer.go new file mode 100644 index 00000000..65f0f948 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/json_object_writer.go @@ -0,0 +1,52 @@ +package internal + +import ( + "bytes" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +type jsonWriter interface { + WriteJSON(buf *bytes.Buffer) +} + +type jsonFieldsWriter struct { + buf *bytes.Buffer + needsComma bool +} + +func (w *jsonFieldsWriter) addKey(key string) { + if w.needsComma { + w.buf.WriteByte(',') + } else { + w.needsComma = true + } + // defensively assume that the key needs escaping: + jsonx.AppendString(w.buf, key) + w.buf.WriteByte(':') +} + +func (w *jsonFieldsWriter) stringField(key string, val string) { + w.addKey(key) + jsonx.AppendString(w.buf, val) +} + +func (w *jsonFieldsWriter) intField(key string, val int64) { + w.addKey(key) + jsonx.AppendInt(w.buf, val) +} + +func (w *jsonFieldsWriter) floatField(key string, val float64) { + w.addKey(key) + jsonx.AppendFloat(w.buf, val) +} + +func (w *jsonFieldsWriter) rawField(key string, val JSONString) { + w.addKey(key) + w.buf.WriteString(string(val)) +} + +func (w *jsonFieldsWriter) writerField(key string, val jsonWriter) { + w.addKey(key) + val.WriteJSON(w.buf) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/jsonx/encode.go b/vendor/github.com/newrelic/go-agent/internal/jsonx/encode.go new file mode 100644 index 00000000..6495829f --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/jsonx/encode.go @@ -0,0 +1,174 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jsonx extends the encoding/json package to encode JSON +// incrementally and without requiring reflection. +package jsonx + +import ( + "bytes" + "encoding/json" + "math" + "reflect" + "strconv" + "unicode/utf8" +) + +var hex = "0123456789abcdef" + +// AppendString escapes s appends it to buf. +func AppendString(buf *bytes.Buffer, s string) { + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.WriteString(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + case '\t': + buf.WriteByte('\\') + buf.WriteByte('t') + default: + // This encodes bytes < 0x20 except for \n and \r, + // as well as <, > and &. The latter are escaped because they + // can lead to security holes when user-controlled strings + // are rendered into JSON and served to some browsers. + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.WriteString(s[start:]) + } + buf.WriteByte('"') +} + +// AppendStringArray appends an array of string literals to buf. +func AppendStringArray(buf *bytes.Buffer, a ...string) { + buf.WriteByte('[') + for i, s := range a { + if i > 0 { + buf.WriteByte(',') + } + AppendString(buf, s) + } + buf.WriteByte(']') +} + +// AppendFloat appends a numeric literal representing the value to buf. +func AppendFloat(buf *bytes.Buffer, x float64) error { + var scratch [64]byte + + if math.IsInf(x, 0) || math.IsNaN(x) { + return &json.UnsupportedValueError{ + Value: reflect.ValueOf(x), + Str: strconv.FormatFloat(x, 'g', -1, 64), + } + } + + buf.Write(strconv.AppendFloat(scratch[:0], x, 'g', -1, 64)) + return nil +} + +// AppendFloatArray appends an array of numeric literals to buf. +func AppendFloatArray(buf *bytes.Buffer, a ...float64) error { + buf.WriteByte('[') + for i, x := range a { + if i > 0 { + buf.WriteByte(',') + } + if err := AppendFloat(buf, x); err != nil { + return err + } + } + buf.WriteByte(']') + return nil +} + +// AppendInt appends a numeric literal representing the value to buf. +func AppendInt(buf *bytes.Buffer, x int64) { + var scratch [64]byte + buf.Write(strconv.AppendInt(scratch[:0], x, 10)) +} + +// AppendIntArray appends an array of numeric literals to buf. +func AppendIntArray(buf *bytes.Buffer, a ...int64) { + var scratch [64]byte + + buf.WriteByte('[') + for i, x := range a { + if i > 0 { + buf.WriteByte(',') + } + buf.Write(strconv.AppendInt(scratch[:0], x, 10)) + } + buf.WriteByte(']') +} + +// AppendUint appends a numeric literal representing the value to buf. +func AppendUint(buf *bytes.Buffer, x uint64) { + var scratch [64]byte + buf.Write(strconv.AppendUint(scratch[:0], x, 10)) +} + +// AppendUintArray appends an array of numeric literals to buf. +func AppendUintArray(buf *bytes.Buffer, a ...uint64) { + var scratch [64]byte + + buf.WriteByte('[') + for i, x := range a { + if i > 0 { + buf.WriteByte(',') + } + buf.Write(strconv.AppendUint(scratch[:0], x, 10)) + } + buf.WriteByte(']') +} diff --git a/vendor/github.com/newrelic/go-agent/internal/jsonx/encode_test.go b/vendor/github.com/newrelic/go-agent/internal/jsonx/encode_test.go new file mode 100644 index 00000000..2b97c5f0 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/jsonx/encode_test.go @@ -0,0 +1,179 @@ +package jsonx + +import ( + "bytes" + "math" + "testing" +) + +func TestAppendFloat(t *testing.T) { + buf := &bytes.Buffer{} + + err := AppendFloat(buf, math.NaN()) + if err == nil { + t.Error("AppendFloat(NaN) should return an error") + } + + err = AppendFloat(buf, math.Inf(1)) + if err == nil { + t.Error("AppendFloat(+Inf) should return an error") + } + + err = AppendFloat(buf, math.Inf(-1)) + if err == nil { + t.Error("AppendFloat(-Inf) should return an error") + } +} + +func TestAppendFloats(t *testing.T) { + buf := &bytes.Buffer{} + + AppendFloatArray(buf) + if want, got := "[]", buf.String(); want != got { + t.Errorf("AppendFloatArray(buf)=%q want=%q", got, want) + } + + buf.Reset() + AppendFloatArray(buf, 3.14) + if want, got := "[3.14]", buf.String(); want != got { + t.Errorf("AppendFloatArray(buf)=%q want=%q", got, want) + } + + buf.Reset() + AppendFloatArray(buf, 1, 2) + if want, got := "[1,2]", buf.String(); want != got { + t.Errorf("AppendFloatArray(buf)=%q want=%q", got, want) + } +} + +func TestAppendInt(t *testing.T) { + buf := &bytes.Buffer{} + + AppendInt(buf, 42) + if got := buf.String(); got != "42" { + t.Errorf("AppendUint(42) = %#q want %#q", got, "42") + } + + buf.Reset() + AppendInt(buf, -42) + if got := buf.String(); got != "-42" { + t.Errorf("AppendUint(-42) = %#q want %#q", got, "-42") + } +} + +func TestAppendIntArray(t *testing.T) { + buf := &bytes.Buffer{} + + AppendIntArray(buf) + if want, got := "[]", buf.String(); want != got { + t.Errorf("AppendIntArray(buf)=%q want=%q", got, want) + } + + buf.Reset() + AppendIntArray(buf, 42) + if want, got := "[42]", buf.String(); want != got { + t.Errorf("AppendIntArray(buf)=%q want=%q", got, want) + } + + buf.Reset() + AppendIntArray(buf, 1, -2) + if want, got := "[1,-2]", buf.String(); want != got { + t.Errorf("AppendIntArray(buf)=%q want=%q", got, want) + } + + buf.Reset() + AppendIntArray(buf, 1, -2, 0) + if want, got := "[1,-2,0]", buf.String(); want != got { + t.Errorf("AppendIntArray(buf)=%q want=%q", got, want) + } +} + +func TestAppendUint(t *testing.T) { + buf := &bytes.Buffer{} + + AppendUint(buf, 42) + if got := buf.String(); got != "42" { + t.Errorf("AppendUint(42) = %#q want %#q", got, "42") + } +} + +func TestAppendUintArray(t *testing.T) { + buf := &bytes.Buffer{} + + AppendUintArray(buf) + if want, got := "[]", buf.String(); want != got { + t.Errorf("AppendUintArray(buf)=%q want=%q", got, want) + } + + buf.Reset() + AppendUintArray(buf, 42) + if want, got := "[42]", buf.String(); want != got { + t.Errorf("AppendUintArray(buf)=%q want=%q", got, want) + } + + buf.Reset() + AppendUintArray(buf, 1, 2) + if want, got := "[1,2]", buf.String(); want != got { + t.Errorf("AppendUintArray(buf)=%q want=%q", got, want) + } + + buf.Reset() + AppendUintArray(buf, 1, 2, 3) + if want, got := "[1,2,3]", buf.String(); want != got { + t.Errorf("AppendUintArray(buf)=%q want=%q", got, want) + } +} + +var encodeStringTests = []struct { + in string + out string +}{ + {"\x00", `"\u0000"`}, + {"\x01", `"\u0001"`}, + {"\x02", `"\u0002"`}, + {"\x03", `"\u0003"`}, + {"\x04", `"\u0004"`}, + {"\x05", `"\u0005"`}, + {"\x06", `"\u0006"`}, + {"\x07", `"\u0007"`}, + {"\x08", `"\u0008"`}, + {"\x09", `"\t"`}, + {"\x0a", `"\n"`}, + {"\x0b", `"\u000b"`}, + {"\x0c", `"\u000c"`}, + {"\x0d", `"\r"`}, + {"\x0e", `"\u000e"`}, + {"\x0f", `"\u000f"`}, + {"\x10", `"\u0010"`}, + {"\x11", `"\u0011"`}, + {"\x12", `"\u0012"`}, + {"\x13", `"\u0013"`}, + {"\x14", `"\u0014"`}, + {"\x15", `"\u0015"`}, + {"\x16", `"\u0016"`}, + {"\x17", `"\u0017"`}, + {"\x18", `"\u0018"`}, + {"\x19", `"\u0019"`}, + {"\x1a", `"\u001a"`}, + {"\x1b", `"\u001b"`}, + {"\x1c", `"\u001c"`}, + {"\x1d", `"\u001d"`}, + {"\x1e", `"\u001e"`}, + {"\x1f", `"\u001f"`}, + {"\\", `"\\"`}, + {`"`, `"\""`}, + {"the\u2028quick\t\nbrown\u2029fox", `"the\u2028quick\t\nbrown\u2029fox"`}, +} + +func TestAppendString(t *testing.T) { + buf := &bytes.Buffer{} + + for _, tt := range encodeStringTests { + buf.Reset() + + AppendString(buf, tt.in) + if got := buf.String(); got != tt.out { + t.Errorf("AppendString(%q) = %#q, want %#q", tt.in, got, tt.out) + } + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/labels.go b/vendor/github.com/newrelic/go-agent/internal/labels.go new file mode 100644 index 00000000..b3671c65 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/labels.go @@ -0,0 +1,23 @@ +package internal + +import "encoding/json" + +// Labels is used for connect JSON formatting. +type Labels map[string]string + +// MarshalJSON requires a comment for golint? +func (l Labels) MarshalJSON() ([]byte, error) { + ls := make([]struct { + Key string `json:"label_type"` + Value string `json:"label_value"` + }, len(l)) + + i := 0 + for key, val := range l { + ls[i].Key = key + ls[i].Value = val + i++ + } + + return json.Marshal(ls) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/limits.go b/vendor/github.com/newrelic/go-agent/internal/limits.go new file mode 100644 index 00000000..f6cee956 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/limits.go @@ -0,0 +1,53 @@ +package internal + +import "time" + +const ( + // app behavior + + // ConnectBackoff is the wait time between unsuccessful connect + // attempts. + ConnectBackoff = 20 * time.Second + // HarvestPeriod is the period that collected data is sent to New Relic. + HarvestPeriod = 60 * time.Second + // CollectorTimeout is the timeout used in the client for communication + // with New Relic's servers. + CollectorTimeout = 20 * time.Second + // AppDataChanSize is the size of the channel that contains data sent + // the app processor. + AppDataChanSize = 200 + failedMetricAttemptsLimit = 5 + failedEventsAttemptsLimit = 10 + + // transaction behavior + maxStackTraceFrames = 100 + // MaxTxnErrors is the maximum number of errors captured per + // transaction. + MaxTxnErrors = 5 + maxTxnTraceNodes = 256 + maxTxnSlowQueries = 10 + + // harvest data + maxMetrics = 2 * 1000 + maxCustomEvents = 10 * 1000 + maxTxnEvents = 10 * 1000 + maxErrorEvents = 100 + maxHarvestErrors = 20 + maxHarvestSlowSQLs = 10 + + // attributes + attributeKeyLengthLimit = 255 + attributeValueLengthLimit = 255 + attributeUserLimit = 64 + attributeAgentLimit = 255 - attributeUserLimit + customEventAttributeLimit = 64 + + // Limits affecting Config validation are found in the config package. + + // RuntimeSamplerPeriod is the period of the runtime sampler. Runtime + // metrics should not depend on the sampler period, but the period must + // be the same across instances. For that reason, this value should not + // be changed without notifying customers that they must update all + // instance simultaneously for valid runtime metrics. + RuntimeSamplerPeriod = 60 * time.Second +) diff --git a/vendor/github.com/newrelic/go-agent/internal/logger/logger.go b/vendor/github.com/newrelic/go-agent/internal/logger/logger.go new file mode 100644 index 00000000..a0e39fcb --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/logger/logger.go @@ -0,0 +1,89 @@ +package logger + +import ( + "encoding/json" + "fmt" + "io" + "log" + "os" +) + +// Logger matches newrelic.Logger to allow implementations to be passed to +// internal packages. +type Logger interface { + Error(msg string, context map[string]interface{}) + Warn(msg string, context map[string]interface{}) + Info(msg string, context map[string]interface{}) + Debug(msg string, context map[string]interface{}) + DebugEnabled() bool +} + +// ShimLogger implements Logger and does nothing. +type ShimLogger struct{} + +// Error allows ShimLogger to implement Logger. +func (s ShimLogger) Error(string, map[string]interface{}) {} + +// Warn allows ShimLogger to implement Logger. +func (s ShimLogger) Warn(string, map[string]interface{}) {} + +// Info allows ShimLogger to implement Logger. +func (s ShimLogger) Info(string, map[string]interface{}) {} + +// Debug allows ShimLogger to implement Logger. +func (s ShimLogger) Debug(string, map[string]interface{}) {} + +// DebugEnabled allows ShimLogger to implement Logger. +func (s ShimLogger) DebugEnabled() bool { return false } + +type logFile struct { + l *log.Logger + doDebug bool +} + +// New creates a basic Logger. +func New(w io.Writer, doDebug bool) Logger { + return &logFile{ + l: log.New(w, logPid, logFlags), + doDebug: doDebug, + } +} + +const logFlags = log.Ldate | log.Ltime | log.Lmicroseconds + +var ( + logPid = fmt.Sprintf("(%d) ", os.Getpid()) +) + +func (f *logFile) fire(level, msg string, ctx map[string]interface{}) { + js, err := json.Marshal(struct { + Level string `json:"level"` + Event string `json:"msg"` + Context map[string]interface{} `json:"context"` + }{ + level, + msg, + ctx, + }) + if nil == err { + f.l.Printf(string(js)) + } else { + f.l.Printf("unable to marshal log entry: %v", err) + } +} + +func (f *logFile) Error(msg string, ctx map[string]interface{}) { + f.fire("error", msg, ctx) +} +func (f *logFile) Warn(msg string, ctx map[string]interface{}) { + f.fire("warn", msg, ctx) +} +func (f *logFile) Info(msg string, ctx map[string]interface{}) { + f.fire("info", msg, ctx) +} +func (f *logFile) Debug(msg string, ctx map[string]interface{}) { + if f.doDebug { + f.fire("debug", msg, ctx) + } +} +func (f *logFile) DebugEnabled() bool { return f.doDebug } diff --git a/vendor/github.com/newrelic/go-agent/internal/metric_names.go b/vendor/github.com/newrelic/go-agent/internal/metric_names.go new file mode 100644 index 00000000..37b6c8eb --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/metric_names.go @@ -0,0 +1,175 @@ +package internal + +const ( + apdexRollup = "Apdex" + apdexPrefix = "Apdex/" + + webRollup = "WebTransaction" + backgroundRollup = "OtherTransaction/all" + + errorsPrefix = "Errors/" + + // "HttpDispatcher" metric is used for the overview graph, and + // therefore should only be made for web transactions. + dispatcherMetric = "HttpDispatcher" + + queueMetric = "WebFrontend/QueueTime" + + webMetricPrefix = "WebTransaction/Go" + backgroundMetricPrefix = "OtherTransaction/Go" + + instanceReporting = "Instance/Reporting" + + // https://newrelic.atlassian.net/wiki/display/eng/Custom+Events+in+New+Relic+Agents + customEventsSeen = "Supportability/Events/Customer/Seen" + customEventsSent = "Supportability/Events/Customer/Sent" + + // https://source.datanerd.us/agents/agent-specs/blob/master/Transaction-Events-PORTED.md + txnEventsSeen = "Supportability/AnalyticsEvents/TotalEventsSeen" + txnEventsSent = "Supportability/AnalyticsEvents/TotalEventsSent" + + // https://source.datanerd.us/agents/agent-specs/blob/master/Error-Events.md + errorEventsSeen = "Supportability/Events/TransactionError/Seen" + errorEventsSent = "Supportability/Events/TransactionError/Sent" + + supportabilityDropped = "Supportability/MetricsDropped" + + // Runtime/System Metrics + memoryPhysical = "Memory/Physical" + heapObjectsAllocated = "Memory/Heap/AllocatedObjects" + cpuUserUtilization = "CPU/User/Utilization" + cpuSystemUtilization = "CPU/System/Utilization" + cpuUserTime = "CPU/User Time" + cpuSystemTime = "CPU/System Time" + runGoroutine = "Go/Runtime/Goroutines" + gcPauseFraction = "GC/System/Pause Fraction" + gcPauses = "GC/System/Pauses" +) + +type rollupMetric struct { + all string + allWeb string + allOther string +} + +func newRollupMetric(s string) rollupMetric { + return rollupMetric{ + all: s + "all", + allWeb: s + "allWeb", + allOther: s + "allOther", + } +} + +func (r rollupMetric) webOrOther(isWeb bool) string { + if isWeb { + return r.allWeb + } + return r.allOther +} + +var ( + errorsRollupMetric = newRollupMetric("Errors/") + + // source.datanerd.us/agents/agent-specs/blob/master/APIs/external_segment.md + // source.datanerd.us/agents/agent-specs/blob/master/APIs/external_cat.md + // source.datanerd.us/agents/agent-specs/blob/master/Cross-Application-Tracing-PORTED.md + externalRollupMetric = newRollupMetric("External/") + + // source.datanerd.us/agents/agent-specs/blob/master/Datastore-Metrics-PORTED.md + datastoreRollupMetric = newRollupMetric("Datastore/") + + datastoreProductMetricsCache = map[string]rollupMetric{ + "Cassandra": newRollupMetric("Datastore/Cassandra/"), + "Derby": newRollupMetric("Datastore/Derby/"), + "Elasticsearch": newRollupMetric("Datastore/Elasticsearch/"), + "Firebird": newRollupMetric("Datastore/Firebird/"), + "IBMDB2": newRollupMetric("Datastore/IBMDB2/"), + "Informix": newRollupMetric("Datastore/Informix/"), + "Memcached": newRollupMetric("Datastore/Memcached/"), + "MongoDB": newRollupMetric("Datastore/MongoDB/"), + "MySQL": newRollupMetric("Datastore/MySQL/"), + "MSSQL": newRollupMetric("Datastore/MSSQL/"), + "Oracle": newRollupMetric("Datastore/Oracle/"), + "Postgres": newRollupMetric("Datastore/Postgres/"), + "Redis": newRollupMetric("Datastore/Redis/"), + "Solr": newRollupMetric("Datastore/Solr/"), + "SQLite": newRollupMetric("Datastore/SQLite/"), + "CouchDB": newRollupMetric("Datastore/CouchDB/"), + "Riak": newRollupMetric("Datastore/Riak/"), + "VoltDB": newRollupMetric("Datastore/VoltDB/"), + } +) + +func customSegmentMetric(s string) string { + return "Custom/" + s +} + +// DatastoreMetricKey contains the fields by which datastore metrics are +// aggregated. +type DatastoreMetricKey struct { + Product string + Collection string + Operation string + Host string + PortPathOrID string +} + +type externalMetricKey struct { + Host string + ExternalCrossProcessID string + ExternalTransactionName string +} + +func datastoreScopedMetric(key DatastoreMetricKey) string { + if "" != key.Collection { + return datastoreStatementMetric(key) + } + return datastoreOperationMetric(key) +} + +// Datastore/{datastore}/* +func datastoreProductMetric(key DatastoreMetricKey) rollupMetric { + d, ok := datastoreProductMetricsCache[key.Product] + if ok { + return d + } + return newRollupMetric("Datastore/" + key.Product + "/") +} + +// Datastore/operation/{datastore}/{operation} +func datastoreOperationMetric(key DatastoreMetricKey) string { + return "Datastore/operation/" + key.Product + + "/" + key.Operation +} + +// Datastore/statement/{datastore}/{table}/{operation} +func datastoreStatementMetric(key DatastoreMetricKey) string { + return "Datastore/statement/" + key.Product + + "/" + key.Collection + + "/" + key.Operation +} + +// Datastore/instance/{datastore}/{host}/{port_path_or_id} +func datastoreInstanceMetric(key DatastoreMetricKey) string { + return "Datastore/instance/" + key.Product + + "/" + key.Host + + "/" + key.PortPathOrID +} + +// External/{host}/all +func externalHostMetric(key externalMetricKey) string { + return "External/" + key.Host + "/all" +} + +// ExternalApp/{host}/{external_id}/all +func externalAppMetric(key externalMetricKey) string { + return "ExternalApp/" + key.Host + + "/" + key.ExternalCrossProcessID + "/all" +} + +// ExternalTransaction/{host}/{external_id}/{external_txnname} +func externalTransactionMetric(key externalMetricKey) string { + return "ExternalTransaction/" + key.Host + + "/" + key.ExternalCrossProcessID + + "/" + key.ExternalTransactionName +} diff --git a/vendor/github.com/newrelic/go-agent/internal/metric_rules.go b/vendor/github.com/newrelic/go-agent/internal/metric_rules.go new file mode 100644 index 00000000..a57f3b5e --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/metric_rules.go @@ -0,0 +1,164 @@ +package internal + +import ( + "encoding/json" + "regexp" + "sort" + "strings" +) + +type ruleResult int + +const ( + ruleMatched ruleResult = iota + ruleUnmatched + ruleIgnore +) + +type metricRule struct { + // 'Ignore' indicates if the entire transaction should be discarded if + // there is a match. This field is only used by "url_rules" and + // "transaction_name_rules", not "metric_name_rules". + Ignore bool `json:"ignore"` + EachSegment bool `json:"each_segment"` + ReplaceAll bool `json:"replace_all"` + Terminate bool `json:"terminate_chain"` + Order int `json:"eval_order"` + OriginalReplacement string `json:"replacement"` + RawExpr string `json:"match_expression"` + + // Go's regexp backreferences use '${1}' instead of the Perlish '\1', so + // we transform the replacement string into the Go syntax and store it + // here. + TransformedReplacement string + re *regexp.Regexp +} + +type metricRules []*metricRule + +// Go's regexp backreferences use `${1}` instead of the Perlish `\1`, so we must +// transform the replacement string. This is non-trivial: `\1` is a +// backreference but `\\1` is not. Rather than count the number of back slashes +// preceding the digit, we simply skip rules with tricky replacements. +var ( + transformReplacementAmbiguous = regexp.MustCompile(`\\\\([0-9]+)`) + transformReplacementRegex = regexp.MustCompile(`\\([0-9]+)`) + transformReplacementReplacement = "$${${1}}" +) + +func (rules *metricRules) UnmarshalJSON(data []byte) (err error) { + var raw []*metricRule + + if err := json.Unmarshal(data, &raw); nil != err { + return err + } + + valid := make(metricRules, 0, len(raw)) + + for _, r := range raw { + re, err := regexp.Compile("(?i)" + r.RawExpr) + if err != nil { + // TODO + // Warn("unable to compile rule", { + // "match_expression": r.RawExpr, + // "error": err.Error(), + // }) + continue + } + + if transformReplacementAmbiguous.MatchString(r.OriginalReplacement) { + // TODO + // Warn("unable to transform replacement", { + // "match_expression": r.RawExpr, + // "replacement": r.OriginalReplacement, + // }) + continue + } + + r.re = re + r.TransformedReplacement = transformReplacementRegex.ReplaceAllString(r.OriginalReplacement, + transformReplacementReplacement) + valid = append(valid, r) + } + + sort.Sort(valid) + + *rules = valid + return nil +} + +func (rules metricRules) Len() int { + return len(rules) +} + +// Rules should be applied in increasing order +func (rules metricRules) Less(i, j int) bool { + return rules[i].Order < rules[j].Order +} +func (rules metricRules) Swap(i, j int) { + rules[i], rules[j] = rules[j], rules[i] +} + +func replaceFirst(re *regexp.Regexp, s string, replacement string) (ruleResult, string) { + // Note that ReplaceAllStringFunc cannot be used here since it does + // not replace $1 placeholders. + loc := re.FindStringIndex(s) + if nil == loc { + return ruleUnmatched, s + } + firstMatch := s[loc[0]:loc[1]] + firstMatchReplaced := re.ReplaceAllString(firstMatch, replacement) + return ruleMatched, s[0:loc[0]] + firstMatchReplaced + s[loc[1]:] +} + +func (r *metricRule) apply(s string) (ruleResult, string) { + // Rules are strange, and there is no spec. + // This code attempts to duplicate the logic of the PHP agent. + // Ambiguity abounds. + + if r.Ignore { + if r.re.MatchString(s) { + return ruleIgnore, "" + } + return ruleUnmatched, s + } + + if r.ReplaceAll { + if r.re.MatchString(s) { + return ruleMatched, r.re.ReplaceAllString(s, r.TransformedReplacement) + } + return ruleUnmatched, s + } else if r.EachSegment { + segments := strings.Split(string(s), "/") + applied := make([]string, len(segments)) + result := ruleUnmatched + for i, segment := range segments { + var segmentMatched ruleResult + segmentMatched, applied[i] = replaceFirst(r.re, segment, r.TransformedReplacement) + if segmentMatched == ruleMatched { + result = ruleMatched + } + } + return result, strings.Join(applied, "/") + } else { + return replaceFirst(r.re, s, r.TransformedReplacement) + } +} + +func (rules metricRules) Apply(input string) string { + var res ruleResult + s := input + + for _, rule := range rules { + res, s = rule.apply(s) + + if ruleIgnore == res { + return "" + } + if (ruleMatched == res) && rule.Terminate { + break + } + } + + return s +} diff --git a/vendor/github.com/newrelic/go-agent/internal/metric_rules_test.go b/vendor/github.com/newrelic/go-agent/internal/metric_rules_test.go new file mode 100644 index 00000000..fb13adee --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/metric_rules_test.go @@ -0,0 +1,96 @@ +package internal + +import ( + "encoding/json" + "testing" + + "github.com/newrelic/go-agent/internal/crossagent" +) + +func TestMetricRules(t *testing.T) { + var tcs []struct { + Testname string `json:"testname"` + Rules metricRules `json:"rules"` + Tests []struct { + Input string `json:"input"` + Expected string `json:"expected"` + } `json:"tests"` + } + + err := crossagent.ReadJSON("rules.json", &tcs) + if err != nil { + t.Fatal(err) + } + + for _, tc := range tcs { + // This test relies upon Perl-specific regex syntax (negative + // lookahead assertions) which are not implemented in Go's + // regexp package. We believe these types of rules are + // exceedingly rare in practice, so we're skipping + // implementation of this exotic syntax for now. + if tc.Testname == "saxon's test" { + continue + } + + for _, x := range tc.Tests { + out := tc.Rules.Apply(x.Input) + if out != x.Expected { + t.Fatal(tc.Testname, x.Input, out, x.Expected) + } + } + } +} + +func TestMetricRuleWithNegativeLookaheadAssertion(t *testing.T) { + js := `[{ + "match_expression":"^(?!account|application).*", + "replacement":"*", + "ignore":false, + "eval_order":0, + "each_segment":true + }]` + var rules metricRules + err := json.Unmarshal([]byte(js), &rules) + if nil != err { + t.Fatal(err) + } + if 0 != rules.Len() { + t.Fatal(rules) + } +} + +func TestNilApplyRules(t *testing.T) { + var rules metricRules + + input := "hello" + out := rules.Apply(input) + if input != out { + t.Fatal(input, out) + } +} + +func TestAmbiguousReplacement(t *testing.T) { + js := `[{ + "match_expression":"(.*)/[^/]*.(bmp|css|gif|ico|jpg|jpeg|js|png)", + "replacement":"\\\\1/*.\\2", + "ignore":false, + "eval_order":0 + }]` + var rules metricRules + err := json.Unmarshal([]byte(js), &rules) + if nil != err { + t.Fatal(err) + } + if 0 != rules.Len() { + t.Fatal(rules) + } +} + +func TestBadMetricRulesJSON(t *testing.T) { + js := `{}` + var rules metricRules + err := json.Unmarshal([]byte(js), &rules) + if nil == err { + t.Fatal("missing bad json error") + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/metrics.go b/vendor/github.com/newrelic/go-agent/internal/metrics.go new file mode 100644 index 00000000..6cf6e858 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/metrics.go @@ -0,0 +1,258 @@ +package internal + +import ( + "bytes" + "time" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +type metricForce int + +const ( + forced metricForce = iota + unforced +) + +type metricID struct { + Name string `json:"name"` + Scope string `json:"scope,omitempty"` +} + +type metricData struct { + // These values are in the units expected by the collector. + countSatisfied float64 // Seconds, or count for Apdex + totalTolerated float64 // Seconds, or count for Apdex + exclusiveFailed float64 // Seconds, or count for Apdex + min float64 // Seconds + max float64 // Seconds + sumSquares float64 // Seconds**2, or 0 for Apdex +} + +func metricDataFromDuration(duration, exclusive time.Duration) metricData { + ds := duration.Seconds() + return metricData{ + countSatisfied: 1, + totalTolerated: ds, + exclusiveFailed: exclusive.Seconds(), + min: ds, + max: ds, + sumSquares: ds * ds, + } +} + +type metric struct { + forced metricForce + data metricData +} + +type metricTable struct { + metricPeriodStart time.Time + failedHarvests int + maxTableSize int // After this max is reached, only forced metrics are added + numDropped int // Number of unforced metrics dropped due to full table + metrics map[metricID]*metric +} + +func newMetricTable(maxTableSize int, now time.Time) *metricTable { + return &metricTable{ + metricPeriodStart: now, + metrics: make(map[metricID]*metric), + maxTableSize: maxTableSize, + failedHarvests: 0, + } +} + +func (mt *metricTable) full() bool { + return len(mt.metrics) >= mt.maxTableSize +} + +func (data *metricData) aggregate(src metricData) { + data.countSatisfied += src.countSatisfied + data.totalTolerated += src.totalTolerated + data.exclusiveFailed += src.exclusiveFailed + + if src.min < data.min { + data.min = src.min + } + if src.max > data.max { + data.max = src.max + } + + data.sumSquares += src.sumSquares +} + +func (mt *metricTable) mergeMetric(id metricID, m metric) { + if to := mt.metrics[id]; nil != to { + to.data.aggregate(m.data) + return + } + + if mt.full() && (unforced == m.forced) { + mt.numDropped++ + return + } + // NOTE: `new` is used in place of `&m` since the latter will make `m` + // get heap allocated regardless of whether or not this line gets + // reached (running go version go1.5 darwin/amd64). See + // BenchmarkAddingSameMetrics. + alloc := new(metric) + *alloc = m + mt.metrics[id] = alloc +} + +func (mt *metricTable) mergeFailed(from *metricTable) { + fails := from.failedHarvests + 1 + if fails >= failedMetricAttemptsLimit { + return + } + if from.metricPeriodStart.Before(mt.metricPeriodStart) { + mt.metricPeriodStart = from.metricPeriodStart + } + mt.failedHarvests = fails + mt.merge(from, "") +} + +func (mt *metricTable) merge(from *metricTable, newScope string) { + if "" == newScope { + for id, m := range from.metrics { + mt.mergeMetric(id, *m) + } + } else { + for id, m := range from.metrics { + mt.mergeMetric(metricID{Name: id.Name, Scope: newScope}, *m) + } + } +} + +func (mt *metricTable) add(name, scope string, data metricData, force metricForce) { + mt.mergeMetric(metricID{Name: name, Scope: scope}, metric{data: data, forced: force}) +} + +func (mt *metricTable) addCount(name string, count float64, force metricForce) { + mt.add(name, "", metricData{countSatisfied: count}, force) +} + +func (mt *metricTable) addSingleCount(name string, force metricForce) { + mt.addCount(name, float64(1), force) +} + +func (mt *metricTable) addDuration(name, scope string, duration, exclusive time.Duration, force metricForce) { + mt.add(name, scope, metricDataFromDuration(duration, exclusive), force) +} + +func (mt *metricTable) addValueExclusive(name, scope string, total, exclusive float64, force metricForce) { + data := metricData{ + countSatisfied: 1, + totalTolerated: total, + exclusiveFailed: exclusive, + min: total, + max: total, + sumSquares: total * total, + } + mt.add(name, scope, data, force) +} + +func (mt *metricTable) addValue(name, scope string, total float64, force metricForce) { + mt.addValueExclusive(name, scope, total, total, force) +} + +func (mt *metricTable) addApdex(name, scope string, apdexThreshold time.Duration, zone ApdexZone, force metricForce) { + apdexSeconds := apdexThreshold.Seconds() + data := metricData{min: apdexSeconds, max: apdexSeconds} + + switch zone { + case ApdexSatisfying: + data.countSatisfied = 1 + case ApdexTolerating: + data.totalTolerated = 1 + case ApdexFailing: + data.exclusiveFailed = 1 + } + + mt.add(name, scope, data, force) +} + +func (mt *metricTable) CollectorJSON(agentRunID string, now time.Time) ([]byte, error) { + if 0 == len(mt.metrics) { + return nil, nil + } + estimatedBytesPerMetric := 128 + estimatedLen := len(mt.metrics) * estimatedBytesPerMetric + buf := bytes.NewBuffer(make([]byte, 0, estimatedLen)) + buf.WriteByte('[') + + jsonx.AppendString(buf, agentRunID) + buf.WriteByte(',') + jsonx.AppendInt(buf, mt.metricPeriodStart.Unix()) + buf.WriteByte(',') + jsonx.AppendInt(buf, now.Unix()) + buf.WriteByte(',') + + buf.WriteByte('[') + first := true + for id, metric := range mt.metrics { + if first { + first = false + } else { + buf.WriteByte(',') + } + buf.WriteByte('[') + buf.WriteByte('{') + buf.WriteString(`"name":`) + jsonx.AppendString(buf, id.Name) + if id.Scope != "" { + buf.WriteString(`,"scope":`) + jsonx.AppendString(buf, id.Scope) + } + buf.WriteByte('}') + buf.WriteByte(',') + + jsonx.AppendFloatArray(buf, + metric.data.countSatisfied, + metric.data.totalTolerated, + metric.data.exclusiveFailed, + metric.data.min, + metric.data.max, + metric.data.sumSquares) + + buf.WriteByte(']') + } + buf.WriteByte(']') + + buf.WriteByte(']') + return buf.Bytes(), nil +} + +func (mt *metricTable) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return mt.CollectorJSON(agentRunID, harvestStart) +} +func (mt *metricTable) MergeIntoHarvest(h *Harvest) { + h.Metrics.mergeFailed(mt) +} + +func (mt *metricTable) ApplyRules(rules metricRules) *metricTable { + if nil == rules { + return mt + } + if len(rules) == 0 { + return mt + } + + applied := newMetricTable(mt.maxTableSize, mt.metricPeriodStart) + cache := make(map[string]string) + + for id, m := range mt.metrics { + out, ok := cache[id.Name] + if !ok { + out = rules.Apply(id.Name) + cache[id.Name] = out + } + + if "" != out { + applied.mergeMetric(metricID{Name: out, Scope: id.Scope}, *m) + } + } + + return applied +} diff --git a/vendor/github.com/newrelic/go-agent/internal/metrics_test.go b/vendor/github.com/newrelic/go-agent/internal/metrics_test.go new file mode 100644 index 00000000..5f327026 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/metrics_test.go @@ -0,0 +1,342 @@ +package internal + +import ( + "encoding/json" + "fmt" + "testing" + "time" +) + +var ( + start = time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + end = time.Date(2014, time.November, 28, 1, 2, 0, 0, time.UTC) +) + +func TestEmptyMetrics(t *testing.T) { + mt := newMetricTable(20, start) + js, err := mt.CollectorJSON(`12345`, end) + if nil != err { + t.Fatal(err) + } + if nil != js { + t.Error(string(js)) + } +} + +func isValidJSON(data []byte) error { + var v interface{} + + return json.Unmarshal(data, &v) +} + +func TestMetrics(t *testing.T) { + mt := newMetricTable(20, start) + + mt.addDuration("one", "", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("two", "my_scope", 4*time.Second, 2*time.Second, unforced) + mt.addDuration("one", "my_scope", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("one", "", 2*time.Second, 1*time.Second, unforced) + + mt.addApdex("apdex satisfied", "", 9*time.Second, ApdexSatisfying, unforced) + mt.addApdex("apdex satisfied", "", 8*time.Second, ApdexSatisfying, unforced) + mt.addApdex("apdex tolerated", "", 7*time.Second, ApdexTolerating, unforced) + mt.addApdex("apdex tolerated", "", 8*time.Second, ApdexTolerating, unforced) + mt.addApdex("apdex failed", "my_scope", 1*time.Second, ApdexFailing, unforced) + + mt.addCount("count 123", float64(123), unforced) + mt.addSingleCount("count 1", unforced) + + ExpectMetrics(t, mt, []WantMetric{ + {"apdex satisfied", "", false, []float64{2, 0, 0, 8, 9, 0}}, + {"apdex tolerated", "", false, []float64{0, 2, 0, 7, 8, 0}}, + {"one", "", false, []float64{2, 4, 2, 2, 2, 8}}, + {"apdex failed", "my_scope", false, []float64{0, 0, 1, 1, 1, 0}}, + {"one", "my_scope", false, []float64{1, 2, 1, 2, 2, 4}}, + {"two", "my_scope", false, []float64{1, 4, 2, 4, 4, 16}}, + {"count 123", "", false, []float64{123, 0, 0, 0, 0, 0}}, + {"count 1", "", false, []float64{1, 0, 0, 0, 0, 0}}, + }) + + js, err := mt.Data("12345", end) + if nil != err { + t.Error(err) + } + // The JSON metric order is not deterministic, so we merely test that it + // is valid JSON. + if err := isValidJSON(js); nil != err { + t.Error(err, string(js)) + } +} + +func TestApplyRules(t *testing.T) { + js := `[ + { + "ignore":false, + "each_segment":false, + "terminate_chain":true, + "replacement":"been_renamed", + "replace_all":false, + "match_expression":"one$", + "eval_order":1 + }, + { + "ignore":true, + "each_segment":false, + "terminate_chain":true, + "replace_all":false, + "match_expression":"ignore_me", + "eval_order":1 + }, + { + "ignore":false, + "each_segment":false, + "terminate_chain":true, + "replacement":"merge_me", + "replace_all":false, + "match_expression":"merge_me[0-9]+$", + "eval_order":1 + } + ]` + var rules metricRules + err := json.Unmarshal([]byte(js), &rules) + if nil != err { + t.Fatal(err) + } + + mt := newMetricTable(20, start) + mt.addDuration("one", "", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("one", "scope1", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("one", "scope2", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("ignore_me", "", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("ignore_me", "scope1", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("ignore_me", "scope2", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("merge_me1", "", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("merge_me2", "", 2*time.Second, 1*time.Second, unforced) + + applied := mt.ApplyRules(rules) + ExpectMetrics(t, applied, []WantMetric{ + {"been_renamed", "", false, []float64{1, 2, 1, 2, 2, 4}}, + {"been_renamed", "scope1", false, []float64{1, 2, 1, 2, 2, 4}}, + {"been_renamed", "scope2", false, []float64{1, 2, 1, 2, 2, 4}}, + {"merge_me", "", false, []float64{2, 4, 2, 2, 2, 8}}, + }) +} + +func TestApplyEmptyRules(t *testing.T) { + js := `[]` + var rules metricRules + err := json.Unmarshal([]byte(js), &rules) + if nil != err { + t.Fatal(err) + } + mt := newMetricTable(20, start) + mt.addDuration("one", "", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("one", "my_scope", 2*time.Second, 1*time.Second, unforced) + applied := mt.ApplyRules(rules) + ExpectMetrics(t, applied, []WantMetric{ + {"one", "", false, []float64{1, 2, 1, 2, 2, 4}}, + {"one", "my_scope", false, []float64{1, 2, 1, 2, 2, 4}}, + }) +} + +func TestApplyNilRules(t *testing.T) { + var rules metricRules + + mt := newMetricTable(20, start) + mt.addDuration("one", "", 2*time.Second, 1*time.Second, unforced) + mt.addDuration("one", "my_scope", 2*time.Second, 1*time.Second, unforced) + applied := mt.ApplyRules(rules) + ExpectMetrics(t, applied, []WantMetric{ + {"one", "", false, []float64{1, 2, 1, 2, 2, 4}}, + {"one", "my_scope", false, []float64{1, 2, 1, 2, 2, 4}}, + }) +} + +func TestForced(t *testing.T) { + mt := newMetricTable(0, start) + + if mt.numDropped != 0 { + t.Fatal(mt.numDropped) + } + + mt.addDuration("unforced", "", 1*time.Second, 1*time.Second, unforced) + mt.addDuration("forced", "", 2*time.Second, 2*time.Second, forced) + + if mt.numDropped != 1 { + t.Fatal(mt.numDropped) + } + + ExpectMetrics(t, mt, []WantMetric{ + {"forced", "", true, []float64{1, 2, 2, 2, 2, 4}}, + }) + +} + +func TestMetricsMergeIntoEmpty(t *testing.T) { + src := newMetricTable(20, start) + src.addDuration("one", "", 2*time.Second, 1*time.Second, unforced) + src.addDuration("two", "", 2*time.Second, 1*time.Second, unforced) + dest := newMetricTable(20, start) + dest.merge(src, "") + + ExpectMetrics(t, dest, []WantMetric{ + {"one", "", false, []float64{1, 2, 1, 2, 2, 4}}, + {"two", "", false, []float64{1, 2, 1, 2, 2, 4}}, + }) +} + +func TestMetricsMergeFromEmpty(t *testing.T) { + src := newMetricTable(20, start) + dest := newMetricTable(20, start) + dest.addDuration("one", "", 2*time.Second, 1*time.Second, unforced) + dest.addDuration("two", "", 2*time.Second, 1*time.Second, unforced) + dest.merge(src, "") + + ExpectMetrics(t, dest, []WantMetric{ + {"one", "", false, []float64{1, 2, 1, 2, 2, 4}}, + {"two", "", false, []float64{1, 2, 1, 2, 2, 4}}, + }) +} + +func TestMetricsMerge(t *testing.T) { + src := newMetricTable(20, start) + dest := newMetricTable(20, start) + dest.addDuration("one", "", 2*time.Second, 1*time.Second, unforced) + dest.addDuration("two", "", 2*time.Second, 1*time.Second, unforced) + src.addDuration("two", "", 2*time.Second, 1*time.Second, unforced) + src.addDuration("three", "", 2*time.Second, 1*time.Second, unforced) + + dest.merge(src, "") + + ExpectMetrics(t, dest, []WantMetric{ + {"one", "", false, []float64{1, 2, 1, 2, 2, 4}}, + {"two", "", false, []float64{2, 4, 2, 2, 2, 8}}, + {"three", "", false, []float64{1, 2, 1, 2, 2, 4}}, + }) +} + +func TestMergeFailedSuccess(t *testing.T) { + src := newMetricTable(20, start) + dest := newMetricTable(20, end) + dest.addDuration("one", "", 2*time.Second, 1*time.Second, unforced) + dest.addDuration("two", "", 2*time.Second, 1*time.Second, unforced) + src.addDuration("two", "", 2*time.Second, 1*time.Second, unforced) + src.addDuration("three", "", 2*time.Second, 1*time.Second, unforced) + + if 0 != dest.failedHarvests { + t.Fatal(dest.failedHarvests) + } + + dest.mergeFailed(src) + + ExpectMetrics(t, dest, []WantMetric{ + {"one", "", false, []float64{1, 2, 1, 2, 2, 4}}, + {"two", "", false, []float64{2, 4, 2, 2, 2, 8}}, + {"three", "", false, []float64{1, 2, 1, 2, 2, 4}}, + }) +} + +func TestMergeFailedLimitReached(t *testing.T) { + src := newMetricTable(20, start) + dest := newMetricTable(20, end) + dest.addDuration("one", "", 2*time.Second, 1*time.Second, unforced) + dest.addDuration("two", "", 2*time.Second, 1*time.Second, unforced) + src.addDuration("two", "", 2*time.Second, 1*time.Second, unforced) + src.addDuration("three", "", 2*time.Second, 1*time.Second, unforced) + + src.failedHarvests = failedMetricAttemptsLimit + + dest.mergeFailed(src) + + ExpectMetrics(t, dest, []WantMetric{ + {"one", "", false, []float64{1, 2, 1, 2, 2, 4}}, + {"two", "", false, []float64{1, 2, 1, 2, 2, 4}}, + }) +} + +func BenchmarkMetricTableCollectorJSON(b *testing.B) { + mt := newMetricTable(2000, time.Now()) + md := metricData{ + countSatisfied: 1234567812345678.1234567812345678, + totalTolerated: 1234567812345678.1234567812345678, + exclusiveFailed: 1234567812345678.1234567812345678, + min: 1234567812345678.1234567812345678, + max: 1234567812345678.1234567812345678, + sumSquares: 1234567812345678.1234567812345678, + } + + for i := 0; i < 20; i++ { + scope := fmt.Sprintf("WebTransaction/Uri/myblog2/%d", i) + + for j := 0; j < 20; j++ { + name := fmt.Sprintf("Datastore/statement/MySQL/City%d/insert", j) + mt.add(name, "", md, forced) + mt.add(name, scope, md, forced) + + name = fmt.Sprintf("WebTransaction/Uri/myblog2/newPost_rum_%d.php", j) + mt.add(name, "", md, forced) + mt.add(name, scope, md, forced) + } + } + + data, err := mt.CollectorJSON("12345", time.Now()) + if nil != err { + b.Fatal(err) + } + if err := isValidJSON(data); nil != err { + b.Fatal(err, string(data)) + } + + b.ResetTimer() + b.ReportAllocs() + + id := "12345" + now := time.Now() + for i := 0; i < b.N; i++ { + mt.CollectorJSON(id, now) + } +} + +func BenchmarkAddingSameMetrics(b *testing.B) { + name := "my_name" + scope := "my_scope" + duration := 2 * time.Second + exclusive := 1 * time.Second + + mt := newMetricTable(2000, time.Now()) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + mt.addDuration(name, scope, duration, exclusive, forced) + mt.addSingleCount(name, forced) + } +} + +func TestMergedMetricsAreCopied(t *testing.T) { + src := newMetricTable(20, start) + dest := newMetricTable(20, start) + + src.addSingleCount("zip", unforced) + dest.merge(src, "") + src.addSingleCount("zip", unforced) + ExpectMetrics(t, dest, []WantMetric{ + {"zip", "", false, []float64{1, 0, 0, 0, 0, 0}}, + }) +} + +func TestMergedWithScope(t *testing.T) { + src := newMetricTable(20, start) + dest := newMetricTable(20, start) + + src.addSingleCount("one", unforced) + src.addDuration("two", "", 2*time.Second, 1*time.Second, unforced) + dest.addDuration("two", "my_scope", 2*time.Second, 1*time.Second, unforced) + dest.merge(src, "my_scope") + + ExpectMetrics(t, dest, []WantMetric{ + {"one", "my_scope", false, []float64{1, 0, 0, 0, 0, 0}}, + {"two", "my_scope", false, []float64{2, 4, 2, 2, 2, 8}}, + }) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/queuing.go b/vendor/github.com/newrelic/go-agent/internal/queuing.go new file mode 100644 index 00000000..cc361f82 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/queuing.go @@ -0,0 +1,72 @@ +package internal + +import ( + "net/http" + "strconv" + "strings" + "time" +) + +const ( + xRequestStart = "X-Request-Start" + xQueueStart = "X-Queue-Start" +) + +var ( + earliestAcceptableSeconds = time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC).Unix() + latestAcceptableSeconds = time.Date(2050, time.January, 1, 0, 0, 0, 0, time.UTC).Unix() +) + +func checkQueueTimeSeconds(secondsFloat float64) time.Time { + seconds := int64(secondsFloat) + nanos := int64((secondsFloat - float64(seconds)) * (1000.0 * 1000.0 * 1000.0)) + if seconds > earliestAcceptableSeconds && seconds < latestAcceptableSeconds { + return time.Unix(seconds, nanos) + } + return time.Time{} +} + +func parseQueueTime(s string) time.Time { + f, err := strconv.ParseFloat(s, 64) + if nil != err { + return time.Time{} + } + if f <= 0 { + return time.Time{} + } + + // try microseconds + if t := checkQueueTimeSeconds(f / (1000.0 * 1000.0)); !t.IsZero() { + return t + } + // try milliseconds + if t := checkQueueTimeSeconds(f / (1000.0)); !t.IsZero() { + return t + } + // try seconds + if t := checkQueueTimeSeconds(f); !t.IsZero() { + return t + } + return time.Time{} +} + +// QueueDuration TODO +func QueueDuration(hdr http.Header, txnStart time.Time) time.Duration { + s := hdr.Get(xQueueStart) + if "" == s { + s = hdr.Get(xRequestStart) + } + if "" == s { + return 0 + } + + s = strings.TrimPrefix(s, "t=") + qt := parseQueueTime(s) + if qt.IsZero() { + return 0 + } + if qt.After(txnStart) { + return 0 + } + return txnStart.Sub(qt) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/queuing_test.go b/vendor/github.com/newrelic/go-agent/internal/queuing_test.go new file mode 100644 index 00000000..54baf3f6 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/queuing_test.go @@ -0,0 +1,93 @@ +package internal + +import ( + "net/http" + "testing" + "time" +) + +func TestParseQueueTime(t *testing.T) { + badInput := []string{ + "", + "nope", + "t", + "0", + "0.0", + "9999999999999999999999999999999999999999999999999", + "-1368811467146000", + "3000000000", + "3000000000000", + "900000000", + "900000000000", + } + for _, s := range badInput { + if qt := parseQueueTime(s); !qt.IsZero() { + t.Error(s, qt) + } + } + + testcases := []struct { + input string + expect int64 + }{ + // Microseconds + {"1368811467146000", 1368811467}, + // Milliseconds + {"1368811467146.000", 1368811467}, + {"1368811467146", 1368811467}, + // Seconds + {"1368811467.146000", 1368811467}, + {"1368811467.146", 1368811467}, + {"1368811467", 1368811467}, + } + for _, tc := range testcases { + qt := parseQueueTime(tc.input) + if qt.Unix() != tc.expect { + t.Error(tc.input, tc.expect, qt, qt.UnixNano()) + } + } +} + +func TestQueueDuration(t *testing.T) { + hdr := make(http.Header) + hdr.Set("X-Queue-Start", "1465798814") + qd := QueueDuration(hdr, time.Unix(1465798816, 0)) + if qd != 2*time.Second { + t.Error(qd) + } + + hdr = make(http.Header) + hdr.Set("X-Request-Start", "1465798814") + qd = QueueDuration(hdr, time.Unix(1465798816, 0)) + if qd != 2*time.Second { + t.Error(qd) + } + + hdr = make(http.Header) + qd = QueueDuration(hdr, time.Unix(1465798816, 0)) + if qd != 0 { + t.Error(qd) + } + + hdr = make(http.Header) + hdr.Set("X-Request-Start", "invalid-time") + qd = QueueDuration(hdr, time.Unix(1465798816, 0)) + if qd != 0 { + t.Error(qd) + } + + hdr = make(http.Header) + hdr.Set("X-Queue-Start", "t=1465798814") + qd = QueueDuration(hdr, time.Unix(1465798816, 0)) + if qd != 2*time.Second { + t.Error(qd) + } + + // incorrect time order + hdr = make(http.Header) + hdr.Set("X-Queue-Start", "t=1465798816") + qd = QueueDuration(hdr, time.Unix(1465798814, 0)) + if qd != 0 { + t.Error(qd) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sampler.go b/vendor/github.com/newrelic/go-agent/internal/sampler.go new file mode 100644 index 00000000..d78cdc64 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sampler.go @@ -0,0 +1,145 @@ +package internal + +import ( + "runtime" + "time" + + "github.com/newrelic/go-agent/internal/logger" + "github.com/newrelic/go-agent/internal/sysinfo" +) + +// Sample is a system/runtime snapshot. +type Sample struct { + when time.Time + memStats runtime.MemStats + usage sysinfo.Usage + numGoroutine int + numCPU int +} + +func bytesToMebibytesFloat(bts uint64) float64 { + return float64(bts) / (1024 * 1024) +} + +// GetSample gathers a new Sample. +func GetSample(now time.Time, lg logger.Logger) *Sample { + s := Sample{ + when: now, + numGoroutine: runtime.NumGoroutine(), + numCPU: runtime.NumCPU(), + } + + if usage, err := sysinfo.GetUsage(); err == nil { + s.usage = usage + } else { + lg.Warn("unable to usage", map[string]interface{}{ + "error": err.Error(), + }) + } + + runtime.ReadMemStats(&s.memStats) + + return &s +} + +type cpuStats struct { + used time.Duration + fraction float64 // used / (elapsed * numCPU) +} + +// Stats contains system information for a period of time. +type Stats struct { + numGoroutine int + allocBytes uint64 + heapObjects uint64 + user cpuStats + system cpuStats + gcPauseFraction float64 + deltaNumGC uint32 + deltaPauseTotal time.Duration + minPause time.Duration + maxPause time.Duration +} + +// Samples is used as the parameter to GetStats to avoid mixing up the previous +// and current sample. +type Samples struct { + Previous *Sample + Current *Sample +} + +// GetStats combines two Samples into a Stats. +func GetStats(ss Samples) Stats { + cur := ss.Current + prev := ss.Previous + elapsed := cur.when.Sub(prev.when) + + s := Stats{ + numGoroutine: cur.numGoroutine, + allocBytes: cur.memStats.Alloc, + heapObjects: cur.memStats.HeapObjects, + } + + // CPU Utilization + totalCPUSeconds := elapsed.Seconds() * float64(cur.numCPU) + if prev.usage.User != 0 && cur.usage.User > prev.usage.User { + s.user.used = cur.usage.User - prev.usage.User + s.user.fraction = s.user.used.Seconds() / totalCPUSeconds + } + if prev.usage.System != 0 && cur.usage.System > prev.usage.System { + s.system.used = cur.usage.System - prev.usage.System + s.system.fraction = s.system.used.Seconds() / totalCPUSeconds + } + + // GC Pause Fraction + deltaPauseTotalNs := cur.memStats.PauseTotalNs - prev.memStats.PauseTotalNs + frac := float64(deltaPauseTotalNs) / float64(elapsed.Nanoseconds()) + s.gcPauseFraction = frac + + // GC Pauses + if deltaNumGC := cur.memStats.NumGC - prev.memStats.NumGC; deltaNumGC > 0 { + // In case more than 256 pauses have happened between samples + // and we are examining a subset of the pauses, we ensure that + // the min and max are not on the same side of the average by + // using the average as the starting min and max. + maxPauseNs := deltaPauseTotalNs / uint64(deltaNumGC) + minPauseNs := deltaPauseTotalNs / uint64(deltaNumGC) + for i := prev.memStats.NumGC + 1; i <= cur.memStats.NumGC; i++ { + pause := cur.memStats.PauseNs[(i+255)%256] + if pause > maxPauseNs { + maxPauseNs = pause + } + if pause < minPauseNs { + minPauseNs = pause + } + } + s.deltaPauseTotal = time.Duration(deltaPauseTotalNs) * time.Nanosecond + s.deltaNumGC = deltaNumGC + s.minPause = time.Duration(minPauseNs) * time.Nanosecond + s.maxPause = time.Duration(maxPauseNs) * time.Nanosecond + } + + return s +} + +// MergeIntoHarvest implements Harvestable. +func (s Stats) MergeIntoHarvest(h *Harvest) { + h.Metrics.addValue(heapObjectsAllocated, "", float64(s.heapObjects), forced) + h.Metrics.addValue(runGoroutine, "", float64(s.numGoroutine), forced) + h.Metrics.addValueExclusive(memoryPhysical, "", bytesToMebibytesFloat(s.allocBytes), 0, forced) + h.Metrics.addValueExclusive(cpuUserUtilization, "", s.user.fraction, 0, forced) + h.Metrics.addValueExclusive(cpuSystemUtilization, "", s.system.fraction, 0, forced) + h.Metrics.addValue(cpuUserTime, "", s.user.used.Seconds(), forced) + h.Metrics.addValue(cpuSystemTime, "", s.system.used.Seconds(), forced) + h.Metrics.addValueExclusive(gcPauseFraction, "", s.gcPauseFraction, 0, forced) + if s.deltaNumGC > 0 { + h.Metrics.add(gcPauses, "", metricData{ + countSatisfied: float64(s.deltaNumGC), + totalTolerated: s.deltaPauseTotal.Seconds(), + exclusiveFailed: 0, + min: s.minPause.Seconds(), + max: s.maxPause.Seconds(), + sumSquares: s.deltaPauseTotal.Seconds() * s.deltaPauseTotal.Seconds(), + }, forced) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sampler_test.go b/vendor/github.com/newrelic/go-agent/internal/sampler_test.go new file mode 100644 index 00000000..235c1bd2 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sampler_test.go @@ -0,0 +1,85 @@ +package internal + +import ( + "testing" + "time" + + "github.com/newrelic/go-agent/internal/logger" +) + +func TestGetSample(t *testing.T) { + now := time.Now() + sample := GetSample(now, logger.ShimLogger{}) + if nil == sample { + t.Fatal(sample) + } + if now != sample.when { + t.Error(now, sample.when) + } + if sample.numGoroutine <= 0 { + t.Error(sample.numGoroutine) + } + if sample.numCPU <= 0 { + t.Error(sample.numCPU) + } + if sample.memStats.HeapObjects == 0 { + t.Error(sample.memStats.HeapObjects) + } +} + +func TestMetricsCreated(t *testing.T) { + now := time.Now() + h := NewHarvest(now) + + stats := Stats{ + heapObjects: 5 * 1000, + numGoroutine: 23, + allocBytes: 37 * 1024 * 1024, + user: cpuStats{ + used: 20 * time.Millisecond, + fraction: 0.01, + }, + system: cpuStats{ + used: 40 * time.Millisecond, + fraction: 0.02, + }, + gcPauseFraction: 3e-05, + deltaNumGC: 2, + deltaPauseTotal: 500 * time.Microsecond, + minPause: 100 * time.Microsecond, + maxPause: 400 * time.Microsecond, + } + + stats.MergeIntoHarvest(h) + + ExpectMetrics(t, h.Metrics, []WantMetric{ + {"Memory/Heap/AllocatedObjects", "", true, []float64{1, 5000, 5000, 5000, 5000, 25000000}}, + {"Memory/Physical", "", true, []float64{1, 37, 0, 37, 37, 1369}}, + {"CPU/User Time", "", true, []float64{1, 0.02, 0.02, 0.02, 0.02, 0.0004}}, + {"CPU/System Time", "", true, []float64{1, 0.04, 0.04, 0.04, 0.04, 0.0016}}, + {"CPU/User/Utilization", "", true, []float64{1, 0.01, 0, 0.01, 0.01, 0.0001}}, + {"CPU/System/Utilization", "", true, []float64{1, 0.02, 0, 0.02, 0.02, 0.0004}}, + {"Go/Runtime/Goroutines", "", true, []float64{1, 23, 23, 23, 23, 529}}, + {"GC/System/Pause Fraction", "", true, []float64{1, 3e-05, 0, 3e-05, 3e-05, 9e-10}}, + {"GC/System/Pauses", "", true, []float64{2, 0.0005, 0, 0.0001, 0.0004, 2.5e-7}}, + }) +} + +func TestMetricsCreatedEmpty(t *testing.T) { + now := time.Now() + h := NewHarvest(now) + stats := Stats{} + + stats.MergeIntoHarvest(h) + + ExpectMetrics(t, h.Metrics, []WantMetric{ + {"Memory/Heap/AllocatedObjects", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"Memory/Physical", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"CPU/User Time", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"CPU/System Time", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"CPU/User/Utilization", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"CPU/System/Utilization", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"Go/Runtime/Goroutines", "", true, []float64{1, 0, 0, 0, 0, 0}}, + {"GC/System/Pause Fraction", "", true, []float64{1, 0, 0, 0, 0, 0}}, + }) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/segment_terms.go b/vendor/github.com/newrelic/go-agent/internal/segment_terms.go new file mode 100644 index 00000000..a0fd1f2e --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/segment_terms.go @@ -0,0 +1,145 @@ +package internal + +// https://newrelic.atlassian.net/wiki/display/eng/Language+agent+transaction+segment+terms+rules + +import ( + "encoding/json" + "strings" +) + +const ( + placeholder = "*" + separator = "/" +) + +type segmentRule struct { + Prefix string `json:"prefix"` + Terms []string `json:"terms"` + TermsMap map[string]struct{} +} + +// segmentRules is keyed by each segmentRule's Prefix field with any trailing +// slash removed. +type segmentRules map[string]*segmentRule + +func buildTermsMap(terms []string) map[string]struct{} { + m := make(map[string]struct{}, len(terms)) + for _, t := range terms { + m[t] = struct{}{} + } + return m +} + +func (rules *segmentRules) UnmarshalJSON(b []byte) error { + var raw []*segmentRule + + if err := json.Unmarshal(b, &raw); nil != err { + return err + } + + rs := make(map[string]*segmentRule) + + for _, rule := range raw { + prefix := strings.TrimSuffix(rule.Prefix, "/") + if len(strings.Split(prefix, "/")) != 2 { + // TODO + // Warn("invalid segment term rule prefix", + // {"prefix": rule.Prefix}) + continue + } + + if nil == rule.Terms { + // TODO + // Warn("segment term rule has missing terms", + // {"prefix": rule.Prefix}) + continue + } + + rule.TermsMap = buildTermsMap(rule.Terms) + + rs[prefix] = rule + } + + *rules = rs + return nil +} + +func (rule *segmentRule) apply(name string) string { + if !strings.HasPrefix(name, rule.Prefix) { + return name + } + + s := strings.TrimPrefix(name, rule.Prefix) + + leadingSlash := "" + if strings.HasPrefix(s, separator) { + leadingSlash = separator + s = strings.TrimPrefix(s, separator) + } + + if "" != s { + segments := strings.Split(s, separator) + + for i, segment := range segments { + _, whitelisted := rule.TermsMap[segment] + if whitelisted { + segments[i] = segment + } else { + segments[i] = placeholder + } + } + + segments = collapsePlaceholders(segments) + s = strings.Join(segments, separator) + } + + return rule.Prefix + leadingSlash + s +} + +func (rules segmentRules) apply(name string) string { + if nil == rules { + return name + } + + rule, ok := rules[firstTwoSegments(name)] + if !ok { + return name + } + + return rule.apply(name) +} + +func firstTwoSegments(name string) string { + firstSlashIdx := strings.Index(name, separator) + if firstSlashIdx == -1 { + return name + } + + secondSlashIdx := strings.Index(name[firstSlashIdx+1:], separator) + if secondSlashIdx == -1 { + return name + } + + return name[0 : firstSlashIdx+secondSlashIdx+1] +} + +func collapsePlaceholders(segments []string) []string { + j := 0 + prevStar := false + for i := 0; i < len(segments); i++ { + segment := segments[i] + if placeholder == segment { + if prevStar { + continue + } + segments[j] = placeholder + j++ + prevStar = true + } else { + segments[j] = segment + j++ + prevStar = false + } + } + return segments[0:j] +} diff --git a/vendor/github.com/newrelic/go-agent/internal/segment_terms_test.go b/vendor/github.com/newrelic/go-agent/internal/segment_terms_test.go new file mode 100644 index 00000000..2e40812f --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/segment_terms_test.go @@ -0,0 +1,134 @@ +package internal + +import ( + "encoding/json" + "strings" + "testing" + + "github.com/newrelic/go-agent/internal/crossagent" +) + +func TestCrossAgentSegmentTerms(t *testing.T) { + var tcs []struct { + Testname string `json:"testname"` + Rules segmentRules `json:"transaction_segment_terms"` + Tests []struct { + Input string `json:"input"` + Expected string `json:"expected"` + } `json:"tests"` + } + + err := crossagent.ReadJSON("transaction_segment_terms.json", &tcs) + if err != nil { + t.Fatal(err) + } + + for _, tc := range tcs { + for _, test := range tc.Tests { + out := tc.Rules.apply(test.Input) + if out != test.Expected { + t.Fatal(tc.Testname, test.Input, out, test.Expected) + } + } + } +} + +func TestSegmentTerms(t *testing.T) { + js := `[ + { + "prefix":"WebTransaction\/Uri", + "terms":[ + "two", + "Users", + "willhf", + "dev", + "php", + "one", + "alpha", + "zap" + ] + } + ]` + var rules segmentRules + if err := json.Unmarshal([]byte(js), &rules); nil != err { + t.Fatal(err) + } + + out := rules.apply("WebTransaction/Uri/pen/two/pencil/dev/paper") + if out != "WebTransaction/Uri/*/two/*/dev/*" { + t.Fatal(out) + } +} + +func TestEmptySegmentTerms(t *testing.T) { + var rules segmentRules + + input := "my/name" + out := rules.apply(input) + if out != input { + t.Error(input, out) + } +} + +func BenchmarkSegmentTerms(b *testing.B) { + js := `[ + { + "prefix":"WebTransaction\/Uri", + "terms":[ + "two", + "Users", + "willhf", + "dev", + "php", + "one", + "alpha", + "zap" + ] + } + ]` + var rules segmentRules + if err := json.Unmarshal([]byte(js), &rules); nil != err { + b.Fatal(err) + } + + b.ResetTimer() + b.ReportAllocs() + + input := "WebTransaction/Uri/pen/two/pencil/dev/paper" + expected := "WebTransaction/Uri/*/two/*/dev/*" + for i := 0; i < b.N; i++ { + out := rules.apply(input) + if out != expected { + b.Fatal(out, expected) + } + } +} + +func TestCollapsePlaceholders(t *testing.T) { + testcases := []struct { + input string + expect string + }{ + {input: "", expect: ""}, + {input: "/", expect: "/"}, + {input: "*", expect: "*"}, + {input: "*/*", expect: "*"}, + {input: "a/b/c", expect: "a/b/c"}, + {input: "*/*/*", expect: "*"}, + {input: "a/*/*/*/b", expect: "a/*/b"}, + {input: "a/b/*/*/*/", expect: "a/b/*/"}, + {input: "a/b/*/*/*", expect: "a/b/*"}, + {input: "*/*/a/b/*/*/*", expect: "*/a/b/*"}, + {input: "*/*/a/b/*/c/*/*/d/e/*/*/*", expect: "*/a/b/*/c/*/d/e/*"}, + {input: "a/*/b", expect: "a/*/b"}, + } + + for _, tc := range testcases { + segments := strings.Split(tc.input, "/") + segments = collapsePlaceholders(segments) + out := strings.Join(segments, "/") + if out != tc.expect { + t.Error(tc.input, tc.expect, out) + } + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/slow_queries.go b/vendor/github.com/newrelic/go-agent/internal/slow_queries.go new file mode 100644 index 00000000..5a687bd6 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/slow_queries.go @@ -0,0 +1,254 @@ +package internal + +import ( + "bytes" + "container/heap" + "hash/fnv" + "time" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +type queryParameters map[string]interface{} + +func vetQueryParameters(params map[string]interface{}) queryParameters { + if nil == params { + return nil + } + // Copying the parameters into a new map is safer than modifying the map + // from the customer. + vetted := make(map[string]interface{}) + for key, val := range params { + if err := validAttributeKey(key); nil != err { + continue + } + val = truncateStringValueIfLongInterface(val) + if err := valueIsValid(val); nil != err { + continue + } + vetted[key] = val + } + return queryParameters(vetted) +} + +func (q queryParameters) WriteJSON(buf *bytes.Buffer) { + buf.WriteByte('{') + w := jsonFieldsWriter{buf: buf} + for key, val := range q { + writeAttributeValueJSON(&w, key, val) + } + buf.WriteByte('}') +} + +// https://source.datanerd.us/agents/agent-specs/blob/master/Slow-SQLs-LEGACY.md + +// slowQueryInstance represents a single datastore call. +type slowQueryInstance struct { + // Fields populated right after the datastore segment finishes: + + Duration time.Duration + DatastoreMetric string + ParameterizedQuery string + QueryParameters queryParameters + Host string + PortPathOrID string + DatabaseName string + StackTrace StackTrace + + // Fields populated when merging into the harvest: + + TxnName string + TxnURL string +} + +// Aggregation is performed to avoid reporting multiple slow queries with same +// query string. Since some datastore segments may be below the slow query +// threshold, the aggregation fields Count, Total, and Min should be taken with +// a grain of salt. +type slowQuery struct { + Count int32 // number of times the query has been observed + Total time.Duration // cummulative duration + Min time.Duration // minimum observed duration + + // When Count > 1, slowQueryInstance contains values from the slowest + // observation. + slowQueryInstance +} + +type slowQueries struct { + priorityQueue []*slowQuery + // lookup maps query strings to indices in the priorityQueue + lookup map[string]int +} + +func (slows *slowQueries) Len() int { + return len(slows.priorityQueue) +} +func (slows *slowQueries) Less(i, j int) bool { + pq := slows.priorityQueue + return pq[i].Duration < pq[j].Duration +} +func (slows *slowQueries) Swap(i, j int) { + pq := slows.priorityQueue + si := pq[i] + sj := pq[j] + pq[i], pq[j] = pq[j], pq[i] + slows.lookup[si.ParameterizedQuery] = j + slows.lookup[sj.ParameterizedQuery] = i +} + +// Push and Pop are unused: only heap.Init and heap.Fix are used. +func (slows *slowQueries) Push(x interface{}) {} +func (slows *slowQueries) Pop() interface{} { return nil } + +func newSlowQueries(max int) *slowQueries { + return &slowQueries{ + lookup: make(map[string]int, max), + priorityQueue: make([]*slowQuery, 0, max), + } +} + +// Merge is used to merge slow queries from the transaction into the harvest. +func (slows *slowQueries) Merge(other *slowQueries, txnName, txnURL string) { + for _, s := range other.priorityQueue { + cp := *s + cp.TxnName = txnName + cp.TxnURL = txnURL + slows.observe(cp) + } +} + +// merge aggregates the observations from two slow queries with the same Query. +func (slow *slowQuery) merge(other slowQuery) { + slow.Count += other.Count + slow.Total += other.Total + + if other.Min < slow.Min { + slow.Min = other.Min + } + if other.Duration > slow.Duration { + slow.slowQueryInstance = other.slowQueryInstance + } +} + +func (slows *slowQueries) observeInstance(slow slowQueryInstance) { + slows.observe(slowQuery{ + Count: 1, + Total: slow.Duration, + Min: slow.Duration, + slowQueryInstance: slow, + }) +} + +func (slows *slowQueries) insertAtIndex(slow slowQuery, idx int) { + cpy := new(slowQuery) + *cpy = slow + slows.priorityQueue[idx] = cpy + slows.lookup[slow.ParameterizedQuery] = idx + heap.Fix(slows, idx) +} + +func (slows *slowQueries) observe(slow slowQuery) { + // Has the query has previously been observed? + if idx, ok := slows.lookup[slow.ParameterizedQuery]; ok { + slows.priorityQueue[idx].merge(slow) + heap.Fix(slows, idx) + return + } + // Has the collection reached max capacity? + if len(slows.priorityQueue) < cap(slows.priorityQueue) { + idx := len(slows.priorityQueue) + slows.priorityQueue = slows.priorityQueue[0 : idx+1] + slows.insertAtIndex(slow, idx) + return + } + // Is this query slower than the existing fastest? + fastest := slows.priorityQueue[0] + if slow.Duration > fastest.Duration { + delete(slows.lookup, fastest.ParameterizedQuery) + slows.insertAtIndex(slow, 0) + return + } +} + +// The third element of the slow query JSON should be a hash of the query +// string. This hash may be used by backend services to aggregate queries which +// have the have the same query string. It is unknown if this actually used. +func makeSlowQueryID(query string) uint32 { + h := fnv.New32a() + h.Write([]byte(query)) + return h.Sum32() +} + +func (slow *slowQuery) WriteJSON(buf *bytes.Buffer) { + buf.WriteByte('[') + jsonx.AppendString(buf, slow.TxnName) + buf.WriteByte(',') + jsonx.AppendString(buf, slow.TxnURL) + buf.WriteByte(',') + jsonx.AppendInt(buf, int64(makeSlowQueryID(slow.ParameterizedQuery))) + buf.WriteByte(',') + jsonx.AppendString(buf, slow.ParameterizedQuery) + buf.WriteByte(',') + jsonx.AppendString(buf, slow.DatastoreMetric) + buf.WriteByte(',') + jsonx.AppendInt(buf, int64(slow.Count)) + buf.WriteByte(',') + jsonx.AppendFloat(buf, slow.Total.Seconds()*1000.0) + buf.WriteByte(',') + jsonx.AppendFloat(buf, slow.Min.Seconds()*1000.0) + buf.WriteByte(',') + jsonx.AppendFloat(buf, slow.Duration.Seconds()*1000.0) + buf.WriteByte(',') + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('{') + if "" != slow.Host { + w.stringField("host", slow.Host) + } + if "" != slow.PortPathOrID { + w.stringField("port_path_or_id", slow.PortPathOrID) + } + if "" != slow.DatabaseName { + w.stringField("database_name", slow.DatabaseName) + } + if nil != slow.StackTrace { + w.writerField("backtrace", slow.StackTrace) + } + if nil != slow.QueryParameters { + w.writerField("query_parameters", slow.QueryParameters) + } + buf.WriteByte('}') + buf.WriteByte(']') +} + +// WriteJSON marshals the collection of slow queries into JSON according to the +// schema expected by the collector. +// +// Note: This JSON does not contain the agentRunID. This is for unknown +// historical reasons. Since the agentRunID is included in the url, +// its use in the other commands' JSON is redundant (although required). +func (slows *slowQueries) WriteJSON(buf *bytes.Buffer) { + buf.WriteByte('[') + buf.WriteByte('[') + for idx, s := range slows.priorityQueue { + if idx > 0 { + buf.WriteByte(',') + } + s.WriteJSON(buf) + } + buf.WriteByte(']') + buf.WriteByte(']') +} + +func (slows *slowQueries) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + if 0 == len(slows.priorityQueue) { + return nil, nil + } + estimate := 1024 * len(slows.priorityQueue) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + slows.WriteJSON(buf) + return buf.Bytes(), nil +} + +func (slows *slowQueries) MergeIntoHarvest(newHarvest *Harvest) { +} diff --git a/vendor/github.com/newrelic/go-agent/internal/slow_queries_test.go b/vendor/github.com/newrelic/go-agent/internal/slow_queries_test.go new file mode 100644 index 00000000..db163c3a --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/slow_queries_test.go @@ -0,0 +1,117 @@ +package internal + +import ( + "math/rand" + "strconv" + "strings" + "testing" + "time" +) + +func TestEmptySlowQueriesData(t *testing.T) { + slows := newSlowQueries(maxHarvestSlowSQLs) + js, err := slows.Data("agentRunID", time.Now()) + if nil != js || nil != err { + t.Error(string(js), err) + } +} + +func TestSlowQueriesBasic(t *testing.T) { + txnSlows := newSlowQueries(maxTxnSlowQueries) + txnSlows.observeInstance(slowQueryInstance{ + Duration: 2 * time.Second, + DatastoreMetric: "Datastore/statement/MySQL/users/INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + Host: "db-server-1", + PortPathOrID: "3306", + DatabaseName: "production", + StackTrace: nil, + QueryParameters: vetQueryParameters(map[string]interface{}{ + strings.Repeat("X", attributeKeyLengthLimit+1): "invalid-key", + "invalid-value": struct{}{}, + "valid": 123, + }), + }) + harvestSlows := newSlowQueries(maxHarvestSlowSQLs) + harvestSlows.Merge(txnSlows, "WebTransaction/Go/hello", "/zip/zap") + js, err := harvestSlows.Data("agentRunID", time.Now()) + expect := CompactJSONString(`[[ + [ + "WebTransaction/Go/hello", + "/zip/zap", + 3722056893, + "INSERT INTO users (name, age) VALUES ($1, $2)", + "Datastore/statement/MySQL/users/INSERT", + 1, + 2000, + 2000, + 2000, + { + "host":"db-server-1", + "port_path_or_id":"3306", + "database_name":"production", + "query_parameters":{ + "valid":123 + } + } + ] +]]`) + if nil != err { + t.Error(err) + } + if string(js) != expect { + t.Error(string(js), expect) + } +} + +func TestSlowQueriesAggregation(t *testing.T) { + max := 50 + slows := make([]slowQueryInstance, 3*max) + for i := 0; i < max; i++ { + num := i + 1 + str := strconv.Itoa(num) + duration := time.Duration(num) * time.Second + slow := slowQueryInstance{ + DatastoreMetric: "Datastore/" + str, + ParameterizedQuery: str, + } + slow.Duration = duration + slow.TxnName = "Txn/0" + str + slow.TxnURL = "/0" + str + slows[i*3+0] = slow + slow.Duration = duration + (100 * time.Second) + slow.TxnName = "Txn/1" + str + slow.TxnURL = "/1" + str + slows[i*3+1] = slow + slow.Duration = duration + (200 * time.Second) + slow.TxnName = "Txn/2" + str + slow.TxnURL = "/2" + str + slows[i*3+2] = slow + } + sq := newSlowQueries(10) + seed := int64(99) // arbitrary fixed seed + r := rand.New(rand.NewSource(seed)) + perm := r.Perm(max * 3) + for _, idx := range perm { + sq.observeInstance(slows[idx]) + } + js, err := sq.Data("agentRunID", time.Now()) + expect := CompactJSONString(`[[ + ["Txn/241","/241",2296612630,"41","Datastore/41",1,241000,241000,241000,{}], + ["Txn/242","/242",2279835011,"42","Datastore/42",2,384000,142000,242000,{}], + ["Txn/243","/243",2263057392,"43","Datastore/43",2,386000,143000,243000,{}], + ["Txn/244","/244",2380500725,"44","Datastore/44",3,432000,44000,244000,{}], + ["Txn/247","/247",2330167868,"47","Datastore/47",2,394000,147000,247000,{}], + ["Txn/245","/245",2363723106,"45","Datastore/45",2,290000,45000,245000,{}], + ["Txn/250","/250",2212577440,"50","Datastore/50",1,250000,250000,250000,{}], + ["Txn/246","/246",2346945487,"46","Datastore/46",2,392000,146000,246000,{}], + ["Txn/249","/249",2430833582,"49","Datastore/49",3,447000,49000,249000,{}], + ["Txn/248","/248",2447611201,"48","Datastore/48",3,444000,48000,248000,{}] +]]`) + if nil != err { + t.Error(err) + } + if string(js) != expect { + t.Error(string(js), expect) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/stacktrace.go b/vendor/github.com/newrelic/go-agent/internal/stacktrace.go new file mode 100644 index 00000000..7ca27508 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/stacktrace.go @@ -0,0 +1,81 @@ +package internal + +import ( + "bytes" + "path" + "runtime" +) + +// StackTrace is a stack trace. +type StackTrace []uintptr + +// GetStackTrace returns a new StackTrace. +func GetStackTrace(skipFrames int) StackTrace { + skip := 2 // skips runtime.Callers and this function + skip += skipFrames + + callers := make([]uintptr, maxStackTraceFrames) + written := runtime.Callers(skip, callers) + return StackTrace(callers[0:written]) +} + +func pcToFunc(pc uintptr) (*runtime.Func, uintptr) { + // The Golang runtime package documentation says "To look up the file + // and line number of the call itself, use pc[i]-1. As an exception to + // this rule, if pc[i-1] corresponds to the function runtime.sigpanic, + // then pc[i] is the program counter of a faulting instruction and + // should be used without any subtraction." + // + // TODO: Fully understand when this subtraction is necessary. + place := pc - 1 + return runtime.FuncForPC(place), place +} + +func topCallerNameBase(st StackTrace) string { + f, _ := pcToFunc(st[0]) + if nil == f { + return "" + } + return path.Base(f.Name()) +} + +// WriteJSON adds the stack trace to the buffer in the JSON form expected by the +// collector. +func (st StackTrace) WriteJSON(buf *bytes.Buffer) { + buf.WriteByte('[') + for i, pc := range st { + // Stack traces may be provided by the customer, and therefore + // may be excessively long. The truncation is done here to + // facilitate testing. + if i >= maxStackTraceFrames { + break + } + if i > 0 { + buf.WriteByte(',') + } + // Implements the format documented here: + // https://source.datanerd.us/agents/agent-specs/blob/master/Stack-Traces.md + buf.WriteByte('{') + if f, place := pcToFunc(pc); nil != f { + name := path.Base(f.Name()) + file, line := f.FileLine(place) + + w := jsonFieldsWriter{buf: buf} + w.stringField("filepath", file) + w.stringField("name", name) + w.intField("line", int64(line)) + } + buf.WriteByte('}') + } + buf.WriteByte(']') +} + +// MarshalJSON prepares JSON in the format expected by the collector. +func (st StackTrace) MarshalJSON() ([]byte, error) { + estimate := 256 * len(st) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + + st.WriteJSON(buf) + + return buf.Bytes(), nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/stacktrace_test.go b/vendor/github.com/newrelic/go-agent/internal/stacktrace_test.go new file mode 100644 index 00000000..31e4b3af --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/stacktrace_test.go @@ -0,0 +1,40 @@ +package internal + +import ( + "encoding/json" + "testing" +) + +func TestGetStackTrace(t *testing.T) { + stack := GetStackTrace(0) + js, err := json.Marshal(stack) + if nil != err { + t.Fatal(err) + } + if nil == js { + t.Fatal(string(js)) + } +} + +func TestLongStackTrace(t *testing.T) { + st := StackTrace(make([]uintptr, maxStackTraceFrames+20)) + js, err := json.Marshal(st) + if nil != err { + t.Fatal(err) + } + expect := `[ + {},{},{},{},{},{},{},{},{},{}, + {},{},{},{},{},{},{},{},{},{}, + {},{},{},{},{},{},{},{},{},{}, + {},{},{},{},{},{},{},{},{},{}, + {},{},{},{},{},{},{},{},{},{}, + {},{},{},{},{},{},{},{},{},{}, + {},{},{},{},{},{},{},{},{},{}, + {},{},{},{},{},{},{},{},{},{}, + {},{},{},{},{},{},{},{},{},{}, + {},{},{},{},{},{},{},{},{},{} + ]` + if string(js) != CompactJSONString(expect) { + t.Error(string(js)) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/docker.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/docker.go new file mode 100644 index 00000000..f031c76d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/docker.go @@ -0,0 +1,124 @@ +package sysinfo + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "regexp" + "runtime" +) + +var ( + // ErrDockerUnsupported is returned if Docker is not supported on the + // platform. + ErrDockerUnsupported = errors.New("Docker unsupported on this platform") + // ErrDockerNotFound is returned if a Docker ID is not found in + // /proc/self/cgroup + ErrDockerNotFound = errors.New("Docker ID not found") +) + +// DockerID attempts to detect Docker. +func DockerID() (string, error) { + if "linux" != runtime.GOOS { + return "", ErrDockerUnsupported + } + + f, err := os.Open("/proc/self/cgroup") + if err != nil { + return "", err + } + defer f.Close() + + return parseDockerID(f) +} + +var ( + dockerIDLength = 64 + dockerIDRegexRaw = fmt.Sprintf("^[0-9a-f]{%d}$", dockerIDLength) + dockerIDRegex = regexp.MustCompile(dockerIDRegexRaw) +) + +func parseDockerID(r io.Reader) (string, error) { + // Each line in the cgroup file consists of three colon delimited fields. + // 1. hierarchy ID - we don't care about this + // 2. subsystems - comma separated list of cgroup subsystem names + // 3. control group - control group to which the process belongs + // + // Example + // 5:cpuacct,cpu,cpuset:/daemons + + for scanner := bufio.NewScanner(r); scanner.Scan(); { + line := scanner.Bytes() + cols := bytes.SplitN(line, []byte(":"), 3) + + if len(cols) < 3 { + continue + } + + // We're only interested in the cpu subsystem. + if !isCPUCol(cols[1]) { + continue + } + + // We're only interested in Docker generated cgroups. + // Reference Implementation: + // case cpu_cgroup + // # docker native driver w/out systemd (fs) + // when %r{^/docker/([0-9a-f]+)$} then $1 + // # docker native driver with systemd + // when %r{^/system\.slice/docker-([0-9a-f]+)\.scope$} then $1 + // # docker lxc driver + // when %r{^/lxc/([0-9a-f]+)$} then $1 + // + var id string + if bytes.HasPrefix(cols[2], []byte("/docker/")) { + id = string(cols[2][len("/docker/"):]) + } else if bytes.HasPrefix(cols[2], []byte("/lxc/")) { + id = string(cols[2][len("/lxc/"):]) + } else if bytes.HasPrefix(cols[2], []byte("/system.slice/docker-")) && + bytes.HasSuffix(cols[2], []byte(".scope")) { + id = string(cols[2][len("/system.slice/docker-") : len(cols[2])-len(".scope")]) + } else { + continue + } + + if err := validateDockerID(id); err != nil { + // We can stop searching at this point, the CPU + // subsystem should only occur once, and its cgroup is + // not docker or not a format we accept. + return "", err + } + return id, nil + } + + return "", ErrDockerNotFound +} + +func isCPUCol(col []byte) bool { + // Sometimes we have multiple subsystems in one line, as in this example + // from: + // https://source.datanerd.us/newrelic/cross_agent_tests/blob/master/docker_container_id/docker-1.1.2-native-driver-systemd.txt + // + // 3:cpuacct,cpu:/system.slice/docker-67f98c9e6188f9c1818672a15dbe46237b6ee7e77f834d40d41c5fb3c2f84a2f.scope + splitCSV := func(r rune) bool { return r == ',' } + subsysCPU := []byte("cpu") + + for _, subsys := range bytes.FieldsFunc(col, splitCSV) { + if bytes.Equal(subsysCPU, subsys) { + return true + } + } + return false +} + +func validateDockerID(id string) error { + if !dockerIDRegex.MatchString(id) { + return fmt.Errorf("%s does not match %s", + id, dockerIDRegexRaw) + } + + return nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/docker_test.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/docker_test.go new file mode 100644 index 00000000..e8dd6c4a --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/docker_test.go @@ -0,0 +1,51 @@ +package sysinfo + +import ( + "bytes" + "path/filepath" + "testing" + + "github.com/newrelic/go-agent/internal/crossagent" +) + +func TestDockerIDCrossAgent(t *testing.T) { + var testCases []struct { + File string `json:"filename"` + ID string `json:"containerId"` + } + + dir := "docker_container_id" + err := crossagent.ReadJSON(filepath.Join(dir, "cases.json"), &testCases) + if err != nil { + t.Fatal(err) + } + + for _, test := range testCases { + file := filepath.Join(dir, test.File) + input, err := crossagent.ReadFile(file) + if err != nil { + t.Error(err) + continue + } + + got, _ := parseDockerID(bytes.NewReader(input)) + if got != test.ID { + t.Error(got, test.ID) + } + } +} + +func TestDockerIDValidation(t *testing.T) { + err := validateDockerID("baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa1239") + if nil != err { + t.Error("Validation should pass with a 64-character hex string.") + } + err = validateDockerID("39ffbba") + if nil == err { + t.Error("Validation should have failed with short string.") + } + err = validateDockerID("z000000000000000000000000000000000000000000000000100000000000000") + if nil == err { + t.Error("Validation should have failed with non-hex characters.") + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_generic.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_generic.go new file mode 100644 index 00000000..ccef4fca --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_generic.go @@ -0,0 +1,10 @@ +// +build !linux + +package sysinfo + +import "os" + +// Hostname returns the host name. +func Hostname() (string, error) { + return os.Hostname() +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_linux.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_linux.go new file mode 100644 index 00000000..e2300854 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_linux.go @@ -0,0 +1,50 @@ +package sysinfo + +import ( + "os" + "syscall" +) + +// Hostname returns the host name. +func Hostname() (string, error) { + // Try the builtin API first, which is designed to match the output of + // /bin/hostname, and fallback to uname(2) if that fails to match the + // behavior of gethostname(2) as implemented by glibc. On Linux, all + // these method should result in the same value because sethostname(2) + // limits the hostname to 64 bytes, the same size of the nodename field + // returned by uname(2). Note that is correspondence is not true on + // other platforms. + // + // os.Hostname failures should be exceedingly rare, however some systems + // configure SELinux to deny read access to /proc/sys/kernel/hostname. + // Redhat's OpenShift platform for example. os.Hostname can also fail if + // some or all of /proc has been hidden via chroot(2) or manipulation of + // the current processes' filesystem namespace via the cgroups APIs. + // Docker is an example of a tool that can configure such an + // environment. + name, err := os.Hostname() + if err == nil { + return name, nil + } + + var uts syscall.Utsname + if err2 := syscall.Uname(&uts); err2 != nil { + // The man page documents only one possible error for uname(2), + // suggesting that as long as the buffer given is valid, the + // call will never fail. Return the original error in the hope + // it provides more relevant information about why the hostname + // can't be retrieved. + return "", err + } + + // Convert Nodename to a Go string. + buf := make([]byte, 0, len(uts.Nodename)) + for _, c := range uts.Nodename { + if c == 0 { + break + } + buf = append(buf, byte(c)) + } + + return string(buf), nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal.go new file mode 100644 index 00000000..0763ee30 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal.go @@ -0,0 +1,40 @@ +package sysinfo + +import ( + "bufio" + "errors" + "io" + "regexp" + "strconv" +) + +// BytesToMebibytes converts bytes into mebibytes. +func BytesToMebibytes(bts uint64) uint64 { + return bts / ((uint64)(1024 * 1024)) +} + +var ( + meminfoRe = regexp.MustCompile(`^MemTotal:\s+([0-9]+)\s+[kK]B$`) + errMemTotalNotFound = errors.New("supported MemTotal not found in /proc/meminfo") +) + +// parseProcMeminfo is used to parse Linux's "/proc/meminfo". It is located +// here so that the relevant cross agent tests will be run on all platforms. +func parseProcMeminfo(f io.Reader) (uint64, error) { + scanner := bufio.NewScanner(f) + for scanner.Scan() { + if m := meminfoRe.FindSubmatch(scanner.Bytes()); m != nil { + kb, err := strconv.ParseUint(string(m[1]), 10, 64) + if err != nil { + return 0, err + } + return kb * 1024, nil + } + } + + err := scanner.Err() + if err == nil { + err = errMemTotalNotFound + } + return 0, err +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_darwin.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_darwin.go new file mode 100644 index 00000000..3c40f42d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_darwin.go @@ -0,0 +1,29 @@ +package sysinfo + +import ( + "syscall" + "unsafe" +) + +// PhysicalMemoryBytes returns the total amount of host memory. +func PhysicalMemoryBytes() (uint64, error) { + mib := []int32{6 /* CTL_HW */, 24 /* HW_MEMSIZE */} + + buf := make([]byte, 8) + bufLen := uintptr(8) + + _, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), + uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&bufLen)), + uintptr(0), uintptr(0)) + + if e1 != 0 { + return 0, e1 + } + + if bufLen != 8 { + return 0, syscall.EIO + } + + return *(*uint64)(unsafe.Pointer(&buf[0])), nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_darwin_test.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_darwin_test.go new file mode 100644 index 00000000..3c928b5c --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_darwin_test.go @@ -0,0 +1,46 @@ +package sysinfo + +import ( + "errors" + "os/exec" + "regexp" + "strconv" + "testing" +) + +var re = regexp.MustCompile(`hw\.memsize:\s*(\d+)`) + +func darwinSysctlMemoryBytes() (uint64, error) { + out, err := exec.Command("/usr/sbin/sysctl", "hw.memsize").Output() + if err != nil { + return 0, err + } + + match := re.FindSubmatch(out) + if match == nil { + return 0, errors.New("memory size not found in sysctl output") + } + + bts, err := strconv.ParseUint(string(match[1]), 10, 64) + if err != nil { + return 0, err + } + + return bts, nil +} + +func TestPhysicalMemoryBytes(t *testing.T) { + mem, err := PhysicalMemoryBytes() + if err != nil { + t.Fatal(err) + } + + mem2, err := darwinSysctlMemoryBytes() + if nil != err { + t.Fatal(err) + } + + if mem != mem2 { + t.Error(mem, mem2) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_freebsd.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_freebsd.go new file mode 100644 index 00000000..2e82320a --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_freebsd.go @@ -0,0 +1,32 @@ +package sysinfo + +import ( + "syscall" + "unsafe" +) + +// PhysicalMemoryBytes returns the total amount of host memory. +func PhysicalMemoryBytes() (uint64, error) { + mib := []int32{6 /* CTL_HW */, 5 /* HW_PHYSMEM */} + + buf := make([]byte, 8) + bufLen := uintptr(8) + + _, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), + uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&bufLen)), + uintptr(0), uintptr(0)) + + if e1 != 0 { + return 0, e1 + } + + switch bufLen { + case 4: + return uint64(*(*uint32)(unsafe.Pointer(&buf[0]))), nil + case 8: + return *(*uint64)(unsafe.Pointer(&buf[0])), nil + default: + return 0, syscall.EIO + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_freebsd_test.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_freebsd_test.go new file mode 100644 index 00000000..64995302 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_freebsd_test.go @@ -0,0 +1,46 @@ +package sysinfo + +import ( + "errors" + "os/exec" + "regexp" + "strconv" + "testing" +) + +var re = regexp.MustCompile(`hw\.physmem:\s*(\d+)`) + +func freebsdSysctlMemoryBytes() (uint64, error) { + out, err := exec.Command("/sbin/sysctl", "hw.physmem").Output() + if err != nil { + return 0, err + } + + match := re.FindSubmatch(out) + if match == nil { + return 0, errors.New("memory size not found in sysctl output") + } + + bts, err := strconv.ParseUint(string(match[1]), 10, 64) + if err != nil { + return 0, err + } + + return bts, nil +} + +func TestPhysicalMemoryBytes(t *testing.T) { + mem, err := PhysicalMemoryBytes() + if err != nil { + t.Fatal(err) + } + + mem2, err := freebsdSysctlMemoryBytes() + if nil != err { + t.Fatal(err) + } + + if mem != mem2 { + t.Error(mem, mem2) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_linux.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_linux.go new file mode 100644 index 00000000..958e5699 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_linux.go @@ -0,0 +1,14 @@ +package sysinfo + +import "os" + +// PhysicalMemoryBytes returns the total amount of host memory. +func PhysicalMemoryBytes() (uint64, error) { + f, err := os.Open("/proc/meminfo") + if err != nil { + return 0, err + } + defer f.Close() + + return parseProcMeminfo(f) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_solaris.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_solaris.go new file mode 100644 index 00000000..4f1c818e --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_solaris.go @@ -0,0 +1,26 @@ +package sysinfo + +/* +#include +*/ +import "C" + +// PhysicalMemoryBytes returns the total amount of host memory. +func PhysicalMemoryBytes() (uint64, error) { + // The function we're calling on Solaris is + // long sysconf(int name); + var pages C.long + var pagesizeBytes C.long + var err error + + pagesizeBytes, err = C.sysconf(C._SC_PAGE_SIZE) + if pagesizeBytes < 1 { + return 0, err + } + pages, err = C.sysconf(C._SC_PHYS_PAGES) + if pages < 1 { + return 0, err + } + + return uint64(pages) * uint64(pagesizeBytes), nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_solaris_test.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_solaris_test.go new file mode 100644 index 00000000..6df12824 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_solaris_test.go @@ -0,0 +1,59 @@ +package sysinfo + +import ( + "errors" + "os/exec" + "regexp" + "strconv" + "strings" + "testing" +) + +func TestPhysicalMemoryBytes(t *testing.T) { + prtconf, err := prtconfMemoryBytes() + if err != nil { + t.Fatal(err) + } + + sysconf, err := PhysicalMemoryBytes() + if err != nil { + t.Fatal(err) + } + + // The pagesize*pages calculation, although standard (the JVM, at least, + // uses this approach), doesn't match up exactly with the number + // returned by prtconf. + if sysconf > prtconf || sysconf < (prtconf-prtconf/20) { + t.Fatal(prtconf, sysconf) + } +} + +var ( + ptrconfRe = regexp.MustCompile(`[Mm]emory\s*size:\s*([0-9]+)\s*([a-zA-Z]+)`) +) + +func prtconfMemoryBytes() (uint64, error) { + output, err := exec.Command("/usr/sbin/prtconf").Output() + if err != nil { + return 0, err + } + + m := ptrconfRe.FindSubmatch(output) + if m == nil { + return 0, errors.New("memory size not found in prtconf output") + } + + size, err := strconv.ParseUint(string(m[1]), 10, 64) + if err != nil { + return 0, err + } + + switch strings.ToLower(string(m[2])) { + case "megabytes", "mb": + return size * 1024 * 1024, nil + case "kilobytes", "kb": + return size * 1024, nil + default: + return 0, errors.New("couldn't parse memory size in prtconf output") + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_test.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_test.go new file mode 100644 index 00000000..fdf7866e --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_test.go @@ -0,0 +1,53 @@ +package sysinfo + +import ( + "os" + "regexp" + "strconv" + "testing" + + "github.com/newrelic/go-agent/internal/crossagent" +) + +func TestMemTotal(t *testing.T) { + var fileRe = regexp.MustCompile(`meminfo_([0-9]+)MB.txt$`) + var ignoreFile = regexp.MustCompile(`README\.md$`) + + testCases, err := crossagent.ReadDir("proc_meminfo") + if err != nil { + t.Fatal(err) + } + + for _, testFile := range testCases { + if ignoreFile.MatchString(testFile) { + continue + } + + matches := fileRe.FindStringSubmatch(testFile) + + if matches == nil || len(matches) < 2 { + t.Error(testFile, matches) + continue + } + + expect, err := strconv.ParseUint(matches[1], 10, 64) + if err != nil { + t.Error(err) + continue + } + + input, err := os.Open(testFile) + if err != nil { + t.Error(err) + continue + } + bts, err := parseProcMeminfo(input) + input.Close() + mib := BytesToMebibytes(bts) + if err != nil { + t.Error(err) + } else if mib != expect { + t.Error(bts, expect) + } + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_windows.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_windows.go new file mode 100644 index 00000000..b211317e --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_windows.go @@ -0,0 +1,23 @@ +package sysinfo + +import ( + "syscall" + "unsafe" +) + +// PhysicalMemoryBytes returns the total amount of host memory. +func PhysicalMemoryBytes() (uint64, error) { + // https://msdn.microsoft.com/en-us/library/windows/desktop/cc300158(v=vs.85).aspx + // http://stackoverflow.com/questions/30743070/query-total-physical-memory-in-windows-with-golang + mod := syscall.NewLazyDLL("kernel32.dll") + proc := mod.NewProc("GetPhysicallyInstalledSystemMemory") + var memkb uint64 + + ret, _, err := proc.Call(uintptr(unsafe.Pointer(&memkb))) + // return value TRUE(1) succeeds, FAILED(0) fails + if ret != 1 { + return 0, err + } + + return memkb * 1024, nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage.go new file mode 100644 index 00000000..071049ed --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage.go @@ -0,0 +1,11 @@ +package sysinfo + +import ( + "time" +) + +// Usage contains process process times. +type Usage struct { + System time.Duration + User time.Duration +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_posix.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_posix.go new file mode 100644 index 00000000..3f7ab31f --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_posix.go @@ -0,0 +1,26 @@ +// +build !windows + +package sysinfo + +import ( + "syscall" + "time" +) + +func timevalToDuration(tv syscall.Timeval) time.Duration { + return time.Duration(tv.Nano()) * time.Nanosecond +} + +// GetUsage gathers process times. +func GetUsage() (Usage, error) { + ru := syscall.Rusage{} + err := syscall.Getrusage(syscall.RUSAGE_SELF, &ru) + if err != nil { + return Usage{}, err + } + + return Usage{ + System: timevalToDuration(ru.Stime), + User: timevalToDuration(ru.Utime), + }, nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_windows.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_windows.go new file mode 100644 index 00000000..8a8677a3 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_windows.go @@ -0,0 +1,34 @@ +package sysinfo + +import ( + "syscall" + "time" +) + +func filetimeToDuration(ft *syscall.Filetime) time.Duration { + ns := ft.Nanoseconds() + return time.Duration(ns) +} + +// GetUsage gathers process times. +func GetUsage() (Usage, error) { + var creationTime syscall.Filetime + var exitTime syscall.Filetime + var kernelTime syscall.Filetime + var userTime syscall.Filetime + + handle, err := syscall.GetCurrentProcess() + if err != nil { + return Usage{}, err + } + + err = syscall.GetProcessTimes(handle, &creationTime, &exitTime, &kernelTime, &userTime) + if err != nil { + return Usage{}, err + } + + return Usage{ + System: filetimeToDuration(&kernelTime), + User: filetimeToDuration(&userTime), + }, nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/tools/rules/main.go b/vendor/github.com/newrelic/go-agent/internal/tools/rules/main.go new file mode 100644 index 00000000..80f42b84 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/tools/rules/main.go @@ -0,0 +1,47 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + + "github.com/newrelic/go-agent/internal" +) + +func fail(reason string) { + fmt.Println(reason) + os.Exit(1) +} + +func main() { + if len(os.Args) < 3 { + fail("improper usage: ./rules path/to/reply_file input") + } + + connectReplyFile := os.Args[1] + name := os.Args[2] + + data, err := ioutil.ReadFile(connectReplyFile) + if nil != err { + fail(fmt.Sprintf("unable to open '%s': %s", connectReplyFile, err)) + } + + var reply internal.ConnectReply + err = json.Unmarshal(data, &reply) + if nil != err { + fail(fmt.Sprintf("unable unmarshal reply: %s", err)) + } + + // Metric Rules + out := reply.MetricRules.Apply(name) + fmt.Println("metric rules applied:", out) + + // Url Rules + Txn Name Rules + Segment Term Rules + + out = internal.CreateFullTxnName(name, &reply, true) + fmt.Println("treated as web txn name:", out) + + out = internal.CreateFullTxnName(name, &reply, false) + fmt.Println("treated as backround txn name:", out) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/tools/utilization/main.go b/vendor/github.com/newrelic/go-agent/internal/tools/utilization/main.go new file mode 100644 index 00000000..f129717b --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/tools/utilization/main.go @@ -0,0 +1,24 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/newrelic/go-agent" + "github.com/newrelic/go-agent/internal/utilization" +) + +func main() { + util := utilization.Gather(utilization.Config{ + DetectAWS: true, + DetectDocker: true, + }, newrelic.NewDebugLogger(os.Stdout)) + + js, err := json.MarshalIndent(util, "", "\t") + if err != nil { + fmt.Printf("%s\n", err) + } else { + fmt.Printf("%s\n", js) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/tracing.go b/vendor/github.com/newrelic/go-agent/internal/tracing.go new file mode 100644 index 00000000..37ce0d84 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/tracing.go @@ -0,0 +1,434 @@ +package internal + +import ( + "errors" + "fmt" + "net/url" + "time" + + "github.com/newrelic/go-agent/internal/sysinfo" +) + +// TxnEvent represents a transaction. +// https://source.datanerd.us/agents/agent-specs/blob/master/Transaction-Events-PORTED.md +// https://newrelic.atlassian.net/wiki/display/eng/Agent+Support+for+Synthetics%3A+Forced+Transaction+Traces+and+Analytic+Events +type TxnEvent struct { + FinalName string + Start time.Time + Duration time.Duration + Queuing time.Duration + Zone ApdexZone + Attrs *Attributes + DatastoreExternalTotals + // CleanURL is not used in txn events, but is used in traced errors which embed TxnEvent. + CleanURL string +} + +// TxnData contains the recorded data of a transaction. +type TxnData struct { + TxnEvent + IsWeb bool + Errors TxnErrors // Lazily initialized. + Stop time.Time + ApdexThreshold time.Duration + Exclusive time.Duration + + finishedChildren time.Duration + stamp segmentStamp + stack []segmentFrame + + customSegments map[string]*metricData + datastoreSegments map[DatastoreMetricKey]*metricData + externalSegments map[externalMetricKey]*metricData + + TxnTrace + + SlowQueriesEnabled bool + SlowQueryThreshold time.Duration + SlowQueries *slowQueries +} + +type segmentStamp uint64 + +type segmentTime struct { + Stamp segmentStamp + Time time.Time +} + +// SegmentStartTime is embedded into the top level segments (rather than +// segmentTime) to minimize the structure sizes to minimize allocations. +type SegmentStartTime struct { + Stamp segmentStamp + Depth int +} + +type segmentFrame struct { + segmentTime + children time.Duration +} + +type segmentEnd struct { + start segmentTime + stop segmentTime + duration time.Duration + exclusive time.Duration +} + +const ( + datastoreProductUnknown = "Unknown" + datastoreOperationUnknown = "other" +) + +// HasErrors indicates whether the transaction had errors. +func (t *TxnData) HasErrors() bool { + return len(t.Errors) > 0 +} + +func (t *TxnData) time(now time.Time) segmentTime { + // Update the stamp before using it so that a 0 stamp can be special. + t.stamp++ + return segmentTime{ + Time: now, + Stamp: t.stamp, + } +} + +// TracerRootChildren is used to calculate a transaction's exclusive duration. +func TracerRootChildren(t *TxnData) time.Duration { + var lostChildren time.Duration + for i := 0; i < len(t.stack); i++ { + lostChildren += t.stack[i].children + } + return t.finishedChildren + lostChildren +} + +// StartSegment begins a segment. +func StartSegment(t *TxnData, now time.Time) SegmentStartTime { + tm := t.time(now) + t.stack = append(t.stack, segmentFrame{ + segmentTime: tm, + children: 0, + }) + + return SegmentStartTime{ + Stamp: tm.Stamp, + Depth: len(t.stack) - 1, + } +} + +var ( + errMalformedSegment = errors.New("segment identifier malformed: perhaps unsafe code has modified it?") + errSegmentOrder = errors.New(`improper segment use: the Transaction must be used ` + + `in a single goroutine and segments must be ended in "last started first ended" order: ` + + `see https://github.com/newrelic/go-agent/blob/master/GUIDE.md#segments`) +) + +func endSegment(t *TxnData, start SegmentStartTime, now time.Time) (segmentEnd, error) { + if 0 == start.Stamp { + return segmentEnd{}, errMalformedSegment + } + if start.Depth >= len(t.stack) { + return segmentEnd{}, errSegmentOrder + } + if start.Depth < 0 { + return segmentEnd{}, errMalformedSegment + } + if start.Stamp != t.stack[start.Depth].Stamp { + return segmentEnd{}, errSegmentOrder + } + + var children time.Duration + for i := start.Depth; i < len(t.stack); i++ { + children += t.stack[i].children + } + s := segmentEnd{ + stop: t.time(now), + start: t.stack[start.Depth].segmentTime, + } + if s.stop.Time.After(s.start.Time) { + s.duration = s.stop.Time.Sub(s.start.Time) + } + if s.duration > children { + s.exclusive = s.duration - children + } + + // Note that we expect (depth == (len(t.stack) - 1)). However, if + // (depth < (len(t.stack) - 1)), that's ok: could be a panic popped + // some stack frames (and the consumer was not using defer). + + if 0 == start.Depth { + t.finishedChildren += s.duration + } else { + t.stack[start.Depth-1].children += s.duration + } + + t.stack = t.stack[0:start.Depth] + + return s, nil +} + +// EndBasicSegment ends a basic segment. +func EndBasicSegment(t *TxnData, start SegmentStartTime, now time.Time, name string) error { + end, err := endSegment(t, start, now) + if nil != err { + return err + } + if nil == t.customSegments { + t.customSegments = make(map[string]*metricData) + } + m := metricDataFromDuration(end.duration, end.exclusive) + if data, ok := t.customSegments[name]; ok { + data.aggregate(m) + } else { + // Use `new` in place of &m so that m is not + // automatically moved to the heap. + cpy := new(metricData) + *cpy = m + t.customSegments[name] = cpy + } + + if t.TxnTrace.considerNode(end) { + t.TxnTrace.witnessNode(end, customSegmentMetric(name), nil) + } + + return nil +} + +// EndExternalSegment ends an external segment. +func EndExternalSegment(t *TxnData, start SegmentStartTime, now time.Time, u *url.URL) error { + end, err := endSegment(t, start, now) + if nil != err { + return err + } + host := HostFromURL(u) + if "" == host { + host = "unknown" + } + key := externalMetricKey{ + Host: host, + ExternalCrossProcessID: "", + ExternalTransactionName: "", + } + if nil == t.externalSegments { + t.externalSegments = make(map[externalMetricKey]*metricData) + } + t.externalCallCount++ + t.externalDuration += end.duration + m := metricDataFromDuration(end.duration, end.exclusive) + if data, ok := t.externalSegments[key]; ok { + data.aggregate(m) + } else { + // Use `new` in place of &m so that m is not + // automatically moved to the heap. + cpy := new(metricData) + *cpy = m + t.externalSegments[key] = cpy + } + + if t.TxnTrace.considerNode(end) { + t.TxnTrace.witnessNode(end, externalHostMetric(key), &traceNodeParams{ + CleanURL: SafeURL(u), + }) + } + + return nil +} + +// EndDatastoreParams contains the parameters for EndDatastoreSegment. +type EndDatastoreParams struct { + Tracer *TxnData + Start SegmentStartTime + Now time.Time + Product string + Collection string + Operation string + ParameterizedQuery string + QueryParameters map[string]interface{} + Host string + PortPathOrID string + Database string +} + +const ( + unknownDatastoreHost = "unknown" + unknownDatastorePortPathOrID = "unknown" +) + +var ( + // ThisHost is the system hostname. + ThisHost = func() string { + if h, err := sysinfo.Hostname(); nil == err { + return h + } + return unknownDatastoreHost + }() + hostsToReplace = map[string]struct{}{ + "localhost": struct{}{}, + "127.0.0.1": struct{}{}, + "0.0.0.0": struct{}{}, + "0:0:0:0:0:0:0:1": struct{}{}, + "::1": struct{}{}, + "0:0:0:0:0:0:0:0": struct{}{}, + "::": struct{}{}, + } +) + +func (t TxnData) slowQueryWorthy(d time.Duration) bool { + return t.SlowQueriesEnabled && (d >= t.SlowQueryThreshold) +} + +// EndDatastoreSegment ends a datastore segment. +func EndDatastoreSegment(p EndDatastoreParams) error { + end, err := endSegment(p.Tracer, p.Start, p.Now) + if nil != err { + return err + } + if p.Operation == "" { + p.Operation = datastoreOperationUnknown + } + if p.Product == "" { + p.Product = datastoreProductUnknown + } + if p.Host == "" && p.PortPathOrID != "" { + p.Host = unknownDatastoreHost + } + if p.PortPathOrID == "" && p.Host != "" { + p.PortPathOrID = unknownDatastorePortPathOrID + } + if _, ok := hostsToReplace[p.Host]; ok { + p.Host = ThisHost + } + + // We still want to create a slowQuery if the consumer has not provided + // a Query string since the stack trace has value. + if p.ParameterizedQuery == "" { + collection := p.Collection + if "" == collection { + collection = "unknown" + } + p.ParameterizedQuery = fmt.Sprintf(`'%s' on '%s' using '%s'`, + p.Operation, collection, p.Product) + } + + key := DatastoreMetricKey{ + Product: p.Product, + Collection: p.Collection, + Operation: p.Operation, + Host: p.Host, + PortPathOrID: p.PortPathOrID, + } + if nil == p.Tracer.datastoreSegments { + p.Tracer.datastoreSegments = make(map[DatastoreMetricKey]*metricData) + } + p.Tracer.datastoreCallCount++ + p.Tracer.datastoreDuration += end.duration + m := metricDataFromDuration(end.duration, end.exclusive) + if data, ok := p.Tracer.datastoreSegments[key]; ok { + data.aggregate(m) + } else { + // Use `new` in place of &m so that m is not + // automatically moved to the heap. + cpy := new(metricData) + *cpy = m + p.Tracer.datastoreSegments[key] = cpy + } + + scopedMetric := datastoreScopedMetric(key) + queryParams := vetQueryParameters(p.QueryParameters) + + if p.Tracer.TxnTrace.considerNode(end) { + p.Tracer.TxnTrace.witnessNode(end, scopedMetric, &traceNodeParams{ + Host: p.Host, + PortPathOrID: p.PortPathOrID, + Database: p.Database, + Query: p.ParameterizedQuery, + queryParameters: queryParams, + }) + } + + if p.Tracer.slowQueryWorthy(end.duration) { + if nil == p.Tracer.SlowQueries { + p.Tracer.SlowQueries = newSlowQueries(maxTxnSlowQueries) + } + // Frames to skip: + // this function + // endDatastore + // DatastoreSegment.End + skipFrames := 3 + p.Tracer.SlowQueries.observeInstance(slowQueryInstance{ + Duration: end.duration, + DatastoreMetric: scopedMetric, + ParameterizedQuery: p.ParameterizedQuery, + QueryParameters: queryParams, + Host: p.Host, + PortPathOrID: p.PortPathOrID, + DatabaseName: p.Database, + StackTrace: GetStackTrace(skipFrames), + }) + } + + return nil +} + +// MergeBreakdownMetrics creates segment metrics. +func MergeBreakdownMetrics(t *TxnData, metrics *metricTable) { + scope := t.FinalName + isWeb := t.IsWeb + // Custom Segment Metrics + for key, data := range t.customSegments { + name := customSegmentMetric(key) + // Unscoped + metrics.add(name, "", *data, unforced) + // Scoped + metrics.add(name, scope, *data, unforced) + } + + // External Segment Metrics + for key, data := range t.externalSegments { + metrics.add(externalRollupMetric.all, "", *data, forced) + metrics.add(externalRollupMetric.webOrOther(isWeb), "", *data, forced) + + hostMetric := externalHostMetric(key) + metrics.add(hostMetric, "", *data, unforced) + if "" != key.ExternalCrossProcessID && "" != key.ExternalTransactionName { + txnMetric := externalTransactionMetric(key) + + // Unscoped CAT metrics + metrics.add(externalAppMetric(key), "", *data, unforced) + metrics.add(txnMetric, "", *data, unforced) + + // Scoped External Metric + metrics.add(txnMetric, scope, *data, unforced) + } else { + // Scoped External Metric + metrics.add(hostMetric, scope, *data, unforced) + } + } + + // Datastore Segment Metrics + for key, data := range t.datastoreSegments { + metrics.add(datastoreRollupMetric.all, "", *data, forced) + metrics.add(datastoreRollupMetric.webOrOther(isWeb), "", *data, forced) + + product := datastoreProductMetric(key) + metrics.add(product.all, "", *data, forced) + metrics.add(product.webOrOther(isWeb), "", *data, forced) + + if key.Host != "" && key.PortPathOrID != "" { + instance := datastoreInstanceMetric(key) + metrics.add(instance, "", *data, unforced) + } + + operation := datastoreOperationMetric(key) + metrics.add(operation, "", *data, unforced) + + if "" != key.Collection { + statement := datastoreStatementMetric(key) + + metrics.add(statement, "", *data, unforced) + metrics.add(statement, scope, *data, unforced) + } else { + metrics.add(operation, scope, *data, unforced) + } + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/tracing_test.go b/vendor/github.com/newrelic/go-agent/internal/tracing_test.go new file mode 100644 index 00000000..727cdcc9 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/tracing_test.go @@ -0,0 +1,481 @@ +package internal + +import ( + "net/url" + "strconv" + "strings" + "testing" + "time" + + "github.com/newrelic/go-agent/internal/crossagent" +) + +func TestStartEndSegment(t *testing.T) { + start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + + tr := &TxnData{} + token := StartSegment(tr, start) + stop := start.Add(1 * time.Second) + end, err := endSegment(tr, token, stop) + if nil != err { + t.Error(err) + } + if end.exclusive != end.duration { + t.Error(end.exclusive, end.duration) + } + if end.duration != 1*time.Second { + t.Error(end.duration) + } + if end.start.Time != start { + t.Error(end.start, start) + } + if end.stop.Time != stop { + t.Error(end.stop, stop) + } +} + +func TestMultipleChildren(t *testing.T) { + start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + tr := &TxnData{} + + t1 := StartSegment(tr, start.Add(1*time.Second)) + t2 := StartSegment(tr, start.Add(2*time.Second)) + end2, err2 := endSegment(tr, t2, start.Add(3*time.Second)) + t3 := StartSegment(tr, start.Add(4*time.Second)) + end3, err3 := endSegment(tr, t3, start.Add(5*time.Second)) + end1, err1 := endSegment(tr, t1, start.Add(6*time.Second)) + t4 := StartSegment(tr, start.Add(7*time.Second)) + end4, err4 := endSegment(tr, t4, start.Add(8*time.Second)) + + if nil != err1 || end1.duration != 5*time.Second || end1.exclusive != 3*time.Second { + t.Error(end1, err1) + } + if nil != err2 || end2.duration != end2.exclusive || end2.duration != time.Second { + t.Error(end2, err2) + } + if nil != err3 || end3.duration != end3.exclusive || end3.duration != time.Second { + t.Error(end3, err3) + } + if nil != err4 || end4.duration != end4.exclusive || end4.duration != time.Second { + t.Error(end4, err4) + } + children := TracerRootChildren(tr) + if children != 6*time.Second { + t.Error(children) + } +} + +func TestInvalidStart(t *testing.T) { + start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + tr := &TxnData{} + + end, err := endSegment(tr, SegmentStartTime{}, start.Add(1*time.Second)) + if err != errMalformedSegment { + t.Error(end, err) + } + StartSegment(tr, start.Add(2*time.Second)) + end, err = endSegment(tr, SegmentStartTime{}, start.Add(3*time.Second)) + if err != errMalformedSegment { + t.Error(end, err) + } +} + +func TestSegmentAlreadyEnded(t *testing.T) { + start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + tr := &TxnData{} + + t1 := StartSegment(tr, start.Add(1*time.Second)) + end, err := endSegment(tr, t1, start.Add(2*time.Second)) + if err != nil { + t.Error(end, err) + } + end, err = endSegment(tr, t1, start.Add(3*time.Second)) + if err != errSegmentOrder { + t.Error(end, err) + } +} + +func TestSegmentBadStamp(t *testing.T) { + start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + tr := &TxnData{} + + t1 := StartSegment(tr, start.Add(1*time.Second)) + t1.Stamp++ + end, err := endSegment(tr, t1, start.Add(2*time.Second)) + if err != errSegmentOrder { + t.Error(end, err) + } +} + +func TestSegmentBadDepth(t *testing.T) { + start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + tr := &TxnData{} + + t1 := StartSegment(tr, start.Add(1*time.Second)) + t1.Depth++ + end, err := endSegment(tr, t1, start.Add(2*time.Second)) + if err != errSegmentOrder { + t.Error(end, err) + } +} + +func TestSegmentNegativeDepth(t *testing.T) { + start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + tr := &TxnData{} + + t1 := StartSegment(tr, start.Add(1*time.Second)) + t1.Depth = -1 + end, err := endSegment(tr, t1, start.Add(2*time.Second)) + if err != errMalformedSegment { + t.Error(end, err) + } +} + +func TestSegmentOutOfOrder(t *testing.T) { + start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + tr := &TxnData{} + + t1 := StartSegment(tr, start.Add(1*time.Second)) + t2 := StartSegment(tr, start.Add(2*time.Second)) + t3 := StartSegment(tr, start.Add(3*time.Second)) + end2, err2 := endSegment(tr, t2, start.Add(4*time.Second)) + end3, err3 := endSegment(tr, t3, start.Add(5*time.Second)) + t4 := StartSegment(tr, start.Add(6*time.Second)) + end4, err4 := endSegment(tr, t4, start.Add(7*time.Second)) + end1, err1 := endSegment(tr, t1, start.Add(8*time.Second)) + + if nil != err1 || + end1.duration != 7*time.Second || + end1.exclusive != 4*time.Second { + t.Error(end1, err1) + } + if nil != err2 || end2.duration != end2.exclusive || end2.duration != 2*time.Second { + t.Error(end2, err2) + } + if err3 != errSegmentOrder { + t.Error(end3, err3) + } + if nil != err4 || end4.duration != end4.exclusive || end4.duration != 1*time.Second { + t.Error(end4, err4) + } +} + +// |-t3-| |-t4-| +// |-t2-| |-never-finished---------- +// |-t1-| |--never-finished------------------------ +// |-------alpha------------------------------------------| +// 0 1 2 3 4 5 6 7 8 9 10 11 12 +func TestLostChildren(t *testing.T) { + start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + tr := &TxnData{} + + alpha := StartSegment(tr, start.Add(1*time.Second)) + t1 := StartSegment(tr, start.Add(2*time.Second)) + EndBasicSegment(tr, t1, start.Add(3*time.Second), "t1") + StartSegment(tr, start.Add(4*time.Second)) + t2 := StartSegment(tr, start.Add(5*time.Second)) + EndBasicSegment(tr, t2, start.Add(6*time.Second), "t2") + StartSegment(tr, start.Add(7*time.Second)) + t3 := StartSegment(tr, start.Add(8*time.Second)) + EndBasicSegment(tr, t3, start.Add(9*time.Second), "t3") + t4 := StartSegment(tr, start.Add(10*time.Second)) + EndBasicSegment(tr, t4, start.Add(11*time.Second), "t4") + EndBasicSegment(tr, alpha, start.Add(12*time.Second), "alpha") + + metrics := newMetricTable(100, time.Now()) + tr.FinalName = "WebTransaction/Go/zip" + tr.IsWeb = true + MergeBreakdownMetrics(tr, metrics) + ExpectMetrics(t, metrics, []WantMetric{ + {"Custom/alpha", "", false, []float64{1, 11, 7, 11, 11, 121}}, + {"Custom/t1", "", false, []float64{1, 1, 1, 1, 1, 1}}, + {"Custom/t2", "", false, []float64{1, 1, 1, 1, 1, 1}}, + {"Custom/t3", "", false, []float64{1, 1, 1, 1, 1, 1}}, + {"Custom/t4", "", false, []float64{1, 1, 1, 1, 1, 1}}, + {"Custom/alpha", tr.FinalName, false, []float64{1, 11, 7, 11, 11, 121}}, + {"Custom/t1", tr.FinalName, false, []float64{1, 1, 1, 1, 1, 1}}, + {"Custom/t2", tr.FinalName, false, []float64{1, 1, 1, 1, 1, 1}}, + {"Custom/t3", tr.FinalName, false, []float64{1, 1, 1, 1, 1, 1}}, + {"Custom/t4", tr.FinalName, false, []float64{1, 1, 1, 1, 1, 1}}, + }) +} + +// |-t3-| |-t4-| +// |-t2-| |-never-finished---------- +// |-t1-| |--never-finished------------------------ +// |-------root------------------------------------------------- +// 0 1 2 3 4 5 6 7 8 9 10 11 12 +func TestLostChildrenRoot(t *testing.T) { + start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + tr := &TxnData{} + + t1 := StartSegment(tr, start.Add(2*time.Second)) + EndBasicSegment(tr, t1, start.Add(3*time.Second), "t1") + StartSegment(tr, start.Add(4*time.Second)) + t2 := StartSegment(tr, start.Add(5*time.Second)) + EndBasicSegment(tr, t2, start.Add(6*time.Second), "t2") + StartSegment(tr, start.Add(7*time.Second)) + t3 := StartSegment(tr, start.Add(8*time.Second)) + EndBasicSegment(tr, t3, start.Add(9*time.Second), "t3") + t4 := StartSegment(tr, start.Add(10*time.Second)) + EndBasicSegment(tr, t4, start.Add(11*time.Second), "t4") + + children := TracerRootChildren(tr) + if children != 4*time.Second { + t.Error(children) + } + + metrics := newMetricTable(100, time.Now()) + tr.FinalName = "WebTransaction/Go/zip" + tr.IsWeb = true + MergeBreakdownMetrics(tr, metrics) + ExpectMetrics(t, metrics, []WantMetric{ + {"Custom/t1", "", false, []float64{1, 1, 1, 1, 1, 1}}, + {"Custom/t2", "", false, []float64{1, 1, 1, 1, 1, 1}}, + {"Custom/t3", "", false, []float64{1, 1, 1, 1, 1, 1}}, + {"Custom/t4", "", false, []float64{1, 1, 1, 1, 1, 1}}, + {"Custom/t1", tr.FinalName, false, []float64{1, 1, 1, 1, 1, 1}}, + {"Custom/t2", tr.FinalName, false, []float64{1, 1, 1, 1, 1, 1}}, + {"Custom/t3", tr.FinalName, false, []float64{1, 1, 1, 1, 1, 1}}, + {"Custom/t4", tr.FinalName, false, []float64{1, 1, 1, 1, 1, 1}}, + }) +} + +func TestSegmentBasic(t *testing.T) { + start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + tr := &TxnData{} + + t1 := StartSegment(tr, start.Add(1*time.Second)) + t2 := StartSegment(tr, start.Add(2*time.Second)) + EndBasicSegment(tr, t2, start.Add(3*time.Second), "t2") + EndBasicSegment(tr, t1, start.Add(4*time.Second), "t1") + t3 := StartSegment(tr, start.Add(5*time.Second)) + t4 := StartSegment(tr, start.Add(6*time.Second)) + EndBasicSegment(tr, t3, start.Add(7*time.Second), "t3") + EndBasicSegment(tr, t4, start.Add(8*time.Second), "out-of-order") + t5 := StartSegment(tr, start.Add(9*time.Second)) + EndBasicSegment(tr, t5, start.Add(10*time.Second), "t1") + + metrics := newMetricTable(100, time.Now()) + tr.FinalName = "WebTransaction/Go/zip" + tr.IsWeb = true + MergeBreakdownMetrics(tr, metrics) + ExpectMetrics(t, metrics, []WantMetric{ + {"Custom/t1", "", false, []float64{2, 4, 3, 1, 3, 10}}, + {"Custom/t2", "", false, []float64{1, 1, 1, 1, 1, 1}}, + {"Custom/t3", "", false, []float64{1, 2, 2, 2, 2, 4}}, + {"Custom/t1", tr.FinalName, false, []float64{2, 4, 3, 1, 3, 10}}, + {"Custom/t2", tr.FinalName, false, []float64{1, 1, 1, 1, 1, 1}}, + {"Custom/t3", tr.FinalName, false, []float64{1, 2, 2, 2, 2, 4}}, + }) +} + +func parseURL(raw string) *url.URL { + u, _ := url.Parse(raw) + return u +} + +func TestSegmentExternal(t *testing.T) { + start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + tr := &TxnData{} + + t1 := StartSegment(tr, start.Add(1*time.Second)) + t2 := StartSegment(tr, start.Add(2*time.Second)) + EndExternalSegment(tr, t2, start.Add(3*time.Second), nil) + EndExternalSegment(tr, t1, start.Add(4*time.Second), parseURL("http://f1.com")) + t3 := StartSegment(tr, start.Add(5*time.Second)) + EndExternalSegment(tr, t3, start.Add(6*time.Second), parseURL("http://f1.com")) + t4 := StartSegment(tr, start.Add(7*time.Second)) + t4.Stamp++ + EndExternalSegment(tr, t4, start.Add(8*time.Second), parseURL("http://invalid-token.com")) + + if tr.externalCallCount != 3 { + t.Error(tr.externalCallCount) + } + if tr.externalDuration != 5*time.Second { + t.Error(tr.externalDuration) + } + metrics := newMetricTable(100, time.Now()) + tr.FinalName = "WebTransaction/Go/zip" + tr.IsWeb = true + MergeBreakdownMetrics(tr, metrics) + ExpectMetrics(t, metrics, []WantMetric{ + {"External/all", "", true, []float64{3, 5, 4, 1, 3, 11}}, + {"External/allWeb", "", true, []float64{3, 5, 4, 1, 3, 11}}, + {"External/f1.com/all", "", false, []float64{2, 4, 3, 1, 3, 10}}, + {"External/unknown/all", "", false, []float64{1, 1, 1, 1, 1, 1}}, + {"External/f1.com/all", tr.FinalName, false, []float64{2, 4, 3, 1, 3, 10}}, + {"External/unknown/all", tr.FinalName, false, []float64{1, 1, 1, 1, 1, 1}}, + }) + + metrics = newMetricTable(100, time.Now()) + tr.FinalName = "OtherTransaction/Go/zip" + tr.IsWeb = false + MergeBreakdownMetrics(tr, metrics) + ExpectMetrics(t, metrics, []WantMetric{ + {"External/all", "", true, []float64{3, 5, 4, 1, 3, 11}}, + {"External/allOther", "", true, []float64{3, 5, 4, 1, 3, 11}}, + {"External/f1.com/all", "", false, []float64{2, 4, 3, 1, 3, 10}}, + {"External/unknown/all", "", false, []float64{1, 1, 1, 1, 1, 1}}, + {"External/f1.com/all", tr.FinalName, false, []float64{2, 4, 3, 1, 3, 10}}, + {"External/unknown/all", tr.FinalName, false, []float64{1, 1, 1, 1, 1, 1}}, + }) +} + +func TestSegmentDatastore(t *testing.T) { + start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + tr := &TxnData{} + + t1 := StartSegment(tr, start.Add(1*time.Second)) + t2 := StartSegment(tr, start.Add(2*time.Second)) + EndDatastoreSegment(EndDatastoreParams{ + Tracer: tr, + Start: t2, + Now: start.Add(3 * time.Second), + Product: "MySQL", + Operation: "SELECT", + Collection: "my_table", + }) + EndDatastoreSegment(EndDatastoreParams{ + Tracer: tr, + Start: t1, + Now: start.Add(4 * time.Second), + Product: "MySQL", + Operation: "SELECT", + // missing collection + }) + t3 := StartSegment(tr, start.Add(5*time.Second)) + EndDatastoreSegment(EndDatastoreParams{ + Tracer: tr, + Start: t3, + Now: start.Add(6 * time.Second), + Product: "MySQL", + Operation: "SELECT", + // missing collection + }) + t4 := StartSegment(tr, start.Add(7*time.Second)) + t4.Stamp++ + EndDatastoreSegment(EndDatastoreParams{ + Tracer: tr, + Start: t4, + Now: start.Add(8 * time.Second), + Product: "MySQL", + Operation: "invalid-token", + }) + t5 := StartSegment(tr, start.Add(9*time.Second)) + EndDatastoreSegment(EndDatastoreParams{ + Tracer: tr, + Start: t5, + Now: start.Add(10 * time.Second), + // missing datastore, collection, and operation + }) + + if tr.datastoreCallCount != 4 { + t.Error(tr.datastoreCallCount) + } + if tr.datastoreDuration != 6*time.Second { + t.Error(tr.datastoreDuration) + } + metrics := newMetricTable(100, time.Now()) + tr.FinalName = "WebTransaction/Go/zip" + tr.IsWeb = true + MergeBreakdownMetrics(tr, metrics) + ExpectMetrics(t, metrics, []WantMetric{ + {"Datastore/all", "", true, []float64{4, 6, 5, 1, 3, 12}}, + {"Datastore/allWeb", "", true, []float64{4, 6, 5, 1, 3, 12}}, + {"Datastore/MySQL/all", "", true, []float64{3, 5, 4, 1, 3, 11}}, + {"Datastore/MySQL/allWeb", "", true, []float64{3, 5, 4, 1, 3, 11}}, + {"Datastore/Unknown/all", "", true, []float64{1, 1, 1, 1, 1, 1}}, + {"Datastore/Unknown/allWeb", "", true, []float64{1, 1, 1, 1, 1, 1}}, + {"Datastore/operation/MySQL/SELECT", "", false, []float64{3, 5, 4, 1, 3, 11}}, + {"Datastore/operation/MySQL/SELECT", tr.FinalName, false, []float64{2, 4, 3, 1, 3, 10}}, + {"Datastore/operation/Unknown/other", "", false, []float64{1, 1, 1, 1, 1, 1}}, + {"Datastore/operation/Unknown/other", tr.FinalName, false, []float64{1, 1, 1, 1, 1, 1}}, + {"Datastore/statement/MySQL/my_table/SELECT", "", false, []float64{1, 1, 1, 1, 1, 1}}, + {"Datastore/statement/MySQL/my_table/SELECT", tr.FinalName, false, []float64{1, 1, 1, 1, 1, 1}}, + }) + + metrics = newMetricTable(100, time.Now()) + tr.FinalName = "OtherTransaction/Go/zip" + tr.IsWeb = false + MergeBreakdownMetrics(tr, metrics) + ExpectMetrics(t, metrics, []WantMetric{ + {"Datastore/all", "", true, []float64{4, 6, 5, 1, 3, 12}}, + {"Datastore/allOther", "", true, []float64{4, 6, 5, 1, 3, 12}}, + {"Datastore/MySQL/all", "", true, []float64{3, 5, 4, 1, 3, 11}}, + {"Datastore/MySQL/allOther", "", true, []float64{3, 5, 4, 1, 3, 11}}, + {"Datastore/Unknown/all", "", true, []float64{1, 1, 1, 1, 1, 1}}, + {"Datastore/Unknown/allOther", "", true, []float64{1, 1, 1, 1, 1, 1}}, + {"Datastore/operation/MySQL/SELECT", "", false, []float64{3, 5, 4, 1, 3, 11}}, + {"Datastore/operation/MySQL/SELECT", tr.FinalName, false, []float64{2, 4, 3, 1, 3, 10}}, + {"Datastore/operation/Unknown/other", "", false, []float64{1, 1, 1, 1, 1, 1}}, + {"Datastore/operation/Unknown/other", tr.FinalName, false, []float64{1, 1, 1, 1, 1, 1}}, + {"Datastore/statement/MySQL/my_table/SELECT", "", false, []float64{1, 1, 1, 1, 1, 1}}, + {"Datastore/statement/MySQL/my_table/SELECT", tr.FinalName, false, []float64{1, 1, 1, 1, 1, 1}}, + }) +} + +func TestDatastoreInstancesCrossAgent(t *testing.T) { + var testcases []struct { + Name string `json:"name"` + SystemHostname string `json:"system_hostname"` + DBHostname string `json:"db_hostname"` + Product string `json:"product"` + Port int `json:"port"` + Socket string `json:"unix_socket"` + DatabasePath string `json:"database_path"` + ExpectedMetric string `json:"expected_instance_metric"` + } + + err := crossagent.ReadJSON("datastores/datastore_instances.json", &testcases) + if err != nil { + t.Fatal(err) + } + + start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + + for _, tc := range testcases { + portPathOrID := "" + if 0 != tc.Port { + portPathOrID = strconv.Itoa(tc.Port) + } else if "" != tc.Socket { + portPathOrID = tc.Socket + } else if "" != tc.DatabasePath { + portPathOrID = tc.DatabasePath + // These tests makes weird assumptions. + tc.DBHostname = "localhost" + } + + tr := &TxnData{} + s := StartSegment(tr, start) + EndDatastoreSegment(EndDatastoreParams{ + Tracer: tr, + Start: s, + Now: start.Add(1 * time.Second), + Product: tc.Product, + Operation: "SELECT", + Collection: "my_table", + PortPathOrID: portPathOrID, + Host: tc.DBHostname, + }) + + expect := strings.Replace(tc.ExpectedMetric, + tc.SystemHostname, ThisHost, -1) + + metrics := newMetricTable(100, time.Now()) + tr.FinalName = "OtherTransaction/Go/zip" + tr.IsWeb = false + MergeBreakdownMetrics(tr, metrics) + data := []float64{1, 1, 1, 1, 1, 1} + ExpectMetrics(ExtendValidator(t, tc.Name), metrics, []WantMetric{ + {"Datastore/all", "", true, data}, + {"Datastore/allOther", "", true, data}, + {"Datastore/" + tc.Product + "/all", "", true, data}, + {"Datastore/" + tc.Product + "/allOther", "", true, data}, + {"Datastore/operation/" + tc.Product + "/SELECT", "", false, data}, + {"Datastore/statement/" + tc.Product + "/my_table/SELECT", "", false, data}, + {"Datastore/statement/" + tc.Product + "/my_table/SELECT", tr.FinalName, false, data}, + {expect, "", false, data}, + }) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/txn_events.go b/vendor/github.com/newrelic/go-agent/internal/txn_events.go new file mode 100644 index 00000000..c7d3deb5 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/txn_events.go @@ -0,0 +1,84 @@ +package internal + +import ( + "bytes" + "math/rand" + "time" +) + +// DatastoreExternalTotals contains overview of external and datastore calls +// made during a transaction. +type DatastoreExternalTotals struct { + externalCallCount uint64 + externalDuration time.Duration + datastoreCallCount uint64 + datastoreDuration time.Duration +} + +// WriteJSON prepares JSON in the format expected by the collector. +func (e *TxnEvent) WriteJSON(buf *bytes.Buffer) { + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('[') + buf.WriteByte('{') + w.stringField("type", "Transaction") + w.stringField("name", e.FinalName) + w.floatField("timestamp", timeToFloatSeconds(e.Start)) + w.floatField("duration", e.Duration.Seconds()) + if ApdexNone != e.Zone { + w.stringField("nr.apdexPerfZone", e.Zone.label()) + } + if e.Queuing > 0 { + w.floatField("queueDuration", e.Queuing.Seconds()) + } + if e.externalCallCount > 0 { + w.intField("externalCallCount", int64(e.externalCallCount)) + w.floatField("externalDuration", e.externalDuration.Seconds()) + } + if e.datastoreCallCount > 0 { + // Note that "database" is used for the keys here instead of + // "datastore" for historical reasons. + w.intField("databaseCallCount", int64(e.datastoreCallCount)) + w.floatField("databaseDuration", e.datastoreDuration.Seconds()) + } + buf.WriteByte('}') + buf.WriteByte(',') + userAttributesJSON(e.Attrs, buf, destTxnEvent) + buf.WriteByte(',') + agentAttributesJSON(e.Attrs, buf, destTxnEvent) + buf.WriteByte(']') +} + +// MarshalJSON is used for testing. +func (e *TxnEvent) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(make([]byte, 0, 256)) + + e.WriteJSON(buf) + + return buf.Bytes(), nil +} + +type txnEvents struct { + events *analyticsEvents +} + +func newTxnEvents(max int) *txnEvents { + return &txnEvents{ + events: newAnalyticsEvents(max), + } +} + +func (events *txnEvents) AddTxnEvent(e *TxnEvent) { + stamp := eventStamp(rand.Float32()) + events.events.addEvent(analyticsEvent{stamp, e}) +} + +func (events *txnEvents) MergeIntoHarvest(h *Harvest) { + h.TxnEvents.events.mergeFailed(events.events) +} + +func (events *txnEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return events.events.CollectorJSON(agentRunID) +} + +func (events *txnEvents) numSeen() float64 { return events.events.NumSeen() } +func (events *txnEvents) numSaved() float64 { return events.events.NumSaved() } diff --git a/vendor/github.com/newrelic/go-agent/internal/txn_events_test.go b/vendor/github.com/newrelic/go-agent/internal/txn_events_test.go new file mode 100644 index 00000000..c1ad4b52 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/txn_events_test.go @@ -0,0 +1,129 @@ +package internal + +import ( + "encoding/json" + "testing" + "time" +) + +func testTxnEventJSON(t *testing.T, e *TxnEvent, expect string) { + js, err := json.Marshal(e) + if nil != err { + t.Error(err) + return + } + expect = CompactJSONString(expect) + if string(js) != expect { + t.Error(string(js), expect) + } +} + +func TestTxnEventMarshal(t *testing.T) { + testTxnEventJSON(t, &TxnEvent{ + FinalName: "myName", + Start: time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC), + Duration: 2 * time.Second, + Zone: ApdexNone, + Attrs: nil, + }, `[ + { + "type":"Transaction", + "name":"myName", + "timestamp":1.41713646e+09, + "duration":2 + }, + {}, + {}]`) + testTxnEventJSON(t, &TxnEvent{ + FinalName: "myName", + Start: time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC), + Duration: 2 * time.Second, + Zone: ApdexFailing, + Attrs: nil, + }, `[ + { + "type":"Transaction", + "name":"myName", + "timestamp":1.41713646e+09, + "duration":2, + "nr.apdexPerfZone":"F" + }, + {}, + {}]`) + testTxnEventJSON(t, &TxnEvent{ + FinalName: "myName", + Start: time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC), + Duration: 2 * time.Second, + Queuing: 5 * time.Second, + Zone: ApdexNone, + Attrs: nil, + }, `[ + { + "type":"Transaction", + "name":"myName", + "timestamp":1.41713646e+09, + "duration":2, + "queueDuration":5 + }, + {}, + {}]`) + testTxnEventJSON(t, &TxnEvent{ + FinalName: "myName", + Start: time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC), + Duration: 2 * time.Second, + Queuing: 5 * time.Second, + Zone: ApdexNone, + Attrs: nil, + DatastoreExternalTotals: DatastoreExternalTotals{ + externalCallCount: 22, + externalDuration: 1122334 * time.Millisecond, + datastoreCallCount: 33, + datastoreDuration: 5566778 * time.Millisecond, + }, + }, `[ + { + "type":"Transaction", + "name":"myName", + "timestamp":1.41713646e+09, + "duration":2, + "queueDuration":5, + "externalCallCount":22, + "externalDuration":1122.334, + "databaseCallCount":33, + "databaseDuration":5566.778 + }, + {}, + {}]`) +} + +func TestTxnEventAttributes(t *testing.T) { + aci := sampleAttributeConfigInput + aci.TransactionEvents.Exclude = append(aci.TransactionEvents.Exclude, "zap") + aci.TransactionEvents.Exclude = append(aci.TransactionEvents.Exclude, hostDisplayName) + cfg := CreateAttributeConfig(aci) + attr := NewAttributes(cfg) + attr.Agent.HostDisplayName = "exclude me" + attr.Agent.RequestMethod = "GET" + AddUserAttribute(attr, "zap", 123, DestAll) + AddUserAttribute(attr, "zip", 456, DestAll) + + testTxnEventJSON(t, &TxnEvent{ + FinalName: "myName", + Start: time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC), + Duration: 2 * time.Second, + Zone: ApdexNone, + Attrs: attr, + }, `[ + { + "type":"Transaction", + "name":"myName", + "timestamp":1.41713646e+09, + "duration":2 + }, + { + "zip":456 + }, + { + "request.method":"GET" + }]`) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/txn_trace.go b/vendor/github.com/newrelic/go-agent/internal/txn_trace.go new file mode 100644 index 00000000..2a481441 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/txn_trace.go @@ -0,0 +1,318 @@ +package internal + +import ( + "bytes" + "container/heap" + "encoding/json" + "sort" + "time" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +// See https://source.datanerd.us/agents/agent-specs/blob/master/Transaction-Trace-LEGACY.md + +type traceNodeHeap []traceNode + +// traceNodeParams is used for trace node parameters. A struct is used in place +// of a map[string]interface{} to facilitate testing and reduce JSON Marshal +// overhead. If too many fields get added here, it probably makes sense to +// start using a map. This struct is not embedded into traceNode to minimize +// the size of traceNode: Not all nodes will have parameters. +type traceNodeParams struct { + StackTrace StackTrace + CleanURL string + Database string + Host string + PortPathOrID string + Query string + queryParameters queryParameters +} + +func (p *traceNodeParams) WriteJSON(buf *bytes.Buffer) { + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('{') + if nil != p.StackTrace { + w.writerField("backtrace", p.StackTrace) + } + if "" != p.CleanURL { + w.stringField("uri", p.CleanURL) + } + if "" != p.Database { + w.stringField("database_name", p.Database) + } + if "" != p.Host { + w.stringField("host", p.Host) + } + if "" != p.PortPathOrID { + w.stringField("port_path_or_id", p.PortPathOrID) + } + if "" != p.Query { + w.stringField("query", p.Query) + } + if nil != p.queryParameters { + w.writerField("query_parameters", p.queryParameters) + } + buf.WriteByte('}') +} + +// MarshalJSON is used for testing. +func (p *traceNodeParams) MarshalJSON() ([]byte, error) { + buf := &bytes.Buffer{} + p.WriteJSON(buf) + return buf.Bytes(), nil +} + +type traceNode struct { + start segmentTime + stop segmentTime + duration time.Duration + params *traceNodeParams + name string +} + +func (h traceNodeHeap) Len() int { return len(h) } +func (h traceNodeHeap) Less(i, j int) bool { return h[i].duration < h[j].duration } +func (h traceNodeHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +// Push and Pop are unused: only heap.Init and heap.Fix are used. +func (h traceNodeHeap) Push(x interface{}) {} +func (h traceNodeHeap) Pop() interface{} { return nil } + +// TxnTrace contains the work in progress transaction trace. +type TxnTrace struct { + Enabled bool + SegmentThreshold time.Duration + StackTraceThreshold time.Duration + nodes traceNodeHeap + maxNodes int +} + +// considerNode exists to prevent unnecessary calls to witnessNode: constructing +// the metric name and params map requires allocations. +func (trace *TxnTrace) considerNode(end segmentEnd) bool { + return trace.Enabled && (end.duration >= trace.SegmentThreshold) +} + +func (trace *TxnTrace) witnessNode(end segmentEnd, name string, params *traceNodeParams) { + node := traceNode{ + start: end.start, + stop: end.stop, + duration: end.duration, + name: name, + params: params, + } + if !trace.considerNode(end) { + return + } + if trace.nodes == nil { + max := trace.maxNodes + if 0 == max { + max = maxTxnTraceNodes + } + trace.nodes = make(traceNodeHeap, 0, max) + } + if end.exclusive >= trace.StackTraceThreshold { + if node.params == nil { + p := new(traceNodeParams) + node.params = p + } + // skip the following stack frames: + // this method + // function in tracing.go (EndBasicSegment, EndExternalSegment, EndDatastoreSegment) + // function in internal_txn.go (endSegment, endExternal, endDatastore) + // segment end method + skip := 4 + node.params.StackTrace = GetStackTrace(skip) + } + if len(trace.nodes) < cap(trace.nodes) { + trace.nodes = append(trace.nodes, node) + if len(trace.nodes) == cap(trace.nodes) { + heap.Init(trace.nodes) + } + return + } + if node.duration <= trace.nodes[0].duration { + return + } + trace.nodes[0] = node + heap.Fix(trace.nodes, 0) +} + +// HarvestTrace contains a finished transaction trace ready for serialization to +// the collector. +type HarvestTrace struct { + TxnEvent + Trace TxnTrace +} + +type nodeDetails struct { + name string + relativeStart time.Duration + relativeStop time.Duration + params *traceNodeParams +} + +func printNodeStart(buf *bytes.Buffer, n nodeDetails) { + // time.Seconds() is intentionally not used here. Millisecond + // precision is enough. + relativeStartMillis := n.relativeStart.Nanoseconds() / (1000 * 1000) + relativeStopMillis := n.relativeStop.Nanoseconds() / (1000 * 1000) + + buf.WriteByte('[') + jsonx.AppendInt(buf, relativeStartMillis) + buf.WriteByte(',') + jsonx.AppendInt(buf, relativeStopMillis) + buf.WriteByte(',') + jsonx.AppendString(buf, n.name) + buf.WriteByte(',') + if nil == n.params { + buf.WriteString("{}") + } else { + n.params.WriteJSON(buf) + } + buf.WriteByte(',') + buf.WriteByte('[') +} + +func printChildren(buf *bytes.Buffer, traceStart time.Time, nodes sortedTraceNodes, next int, stop segmentStamp) int { + firstChild := true + for next < len(nodes) && nodes[next].start.Stamp < stop { + if firstChild { + firstChild = false + } else { + buf.WriteByte(',') + } + printNodeStart(buf, nodeDetails{ + name: nodes[next].name, + relativeStart: nodes[next].start.Time.Sub(traceStart), + relativeStop: nodes[next].stop.Time.Sub(traceStart), + params: nodes[next].params, + }) + next = printChildren(buf, traceStart, nodes, next+1, nodes[next].stop.Stamp) + buf.WriteString("]]") + + } + return next +} + +type sortedTraceNodes []*traceNode + +func (s sortedTraceNodes) Len() int { return len(s) } +func (s sortedTraceNodes) Less(i, j int) bool { return s[i].start.Stamp < s[j].start.Stamp } +func (s sortedTraceNodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// MarshalJSON prepares the trace in the JSON expected by the collector. +func (trace *HarvestTrace) MarshalJSON() ([]byte, error) { + estimate := 100 * len(trace.Trace.nodes) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + + nodes := make(sortedTraceNodes, len(trace.Trace.nodes)) + for i := 0; i < len(nodes); i++ { + nodes[i] = &trace.Trace.nodes[i] + } + sort.Sort(nodes) + + buf.WriteByte('[') // begin trace + + jsonx.AppendInt(buf, trace.Start.UnixNano()/1000) + buf.WriteByte(',') + jsonx.AppendFloat(buf, trace.Duration.Seconds()*1000.0) + buf.WriteByte(',') + jsonx.AppendString(buf, trace.FinalName) + buf.WriteByte(',') + jsonx.AppendString(buf, trace.CleanURL) + buf.WriteByte(',') + + buf.WriteByte('[') // begin trace data + + // If the trace string pool is used, insert another array here. + + jsonx.AppendFloat(buf, 0.0) // unused timestamp + buf.WriteByte(',') // + buf.WriteString("{}") // unused: formerly request parameters + buf.WriteByte(',') // + buf.WriteString("{}") // unused: formerly custom parameters + buf.WriteByte(',') // + + printNodeStart(buf, nodeDetails{ // begin outer root + name: "ROOT", + relativeStart: 0, + relativeStop: trace.Duration, + }) + + printNodeStart(buf, nodeDetails{ // begin inner root + name: trace.FinalName, + relativeStart: 0, + relativeStop: trace.Duration, + }) + + if len(nodes) > 0 { + lastStopStamp := nodes[len(nodes)-1].stop.Stamp + 1 + printChildren(buf, trace.Start, nodes, 0, lastStopStamp) + } + + buf.WriteString("]]") // end outer root + buf.WriteString("]]") // end inner root + + buf.WriteByte(',') + buf.WriteByte('{') + buf.WriteString(`"agentAttributes":`) + agentAttributesJSON(trace.Attrs, buf, destTxnTrace) + buf.WriteByte(',') + buf.WriteString(`"userAttributes":`) + userAttributesJSON(trace.Attrs, buf, destTxnTrace) + buf.WriteByte(',') + buf.WriteString(`"intrinsics":{}`) // TODO intrinsics + buf.WriteByte('}') + + // If the trace string pool is used, end another array here. + + buf.WriteByte(']') // end trace data + + buf.WriteByte(',') + buf.WriteString(`""`) // GUID is not yet supported + buf.WriteByte(',') // + buf.WriteString(`null`) // reserved for future use + buf.WriteByte(',') // + buf.WriteString(`false`) // ForcePersist is not yet supported + buf.WriteByte(',') // + buf.WriteString(`null`) // X-Ray sessions not supported + buf.WriteByte(',') // + buf.WriteString(`""`) // SyntheticsResourceID is not yet supported + + buf.WriteByte(']') // end trace + + return buf.Bytes(), nil + +} + +type harvestTraces struct { + trace *HarvestTrace +} + +func newHarvestTraces() *harvestTraces { + return &harvestTraces{} +} + +func (traces *harvestTraces) Witness(trace HarvestTrace) { + if nil == traces.trace || traces.trace.Duration < trace.Duration { + cpy := new(HarvestTrace) + *cpy = trace + traces.trace = cpy + } +} + +func (traces *harvestTraces) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + if nil == traces.trace { + return nil, nil + } + return json.Marshal([]interface{}{ + agentRunID, + []interface{}{ + traces.trace, + }, + }) +} + +func (traces *harvestTraces) MergeIntoHarvest(h *Harvest) {} diff --git a/vendor/github.com/newrelic/go-agent/internal/txn_trace_test.go b/vendor/github.com/newrelic/go-agent/internal/txn_trace_test.go new file mode 100644 index 00000000..fc4d38f4 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/txn_trace_test.go @@ -0,0 +1,583 @@ +package internal + +import ( + "strconv" + "testing" + "time" +) + +func TestTxnTrace(t *testing.T) { + start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + tr := &TxnData{} + tr.TxnTrace.Enabled = true + tr.TxnTrace.StackTraceThreshold = 1 * time.Hour + tr.TxnTrace.SegmentThreshold = 0 + + t1 := StartSegment(tr, start.Add(1*time.Second)) + t2 := StartSegment(tr, start.Add(2*time.Second)) + EndDatastoreSegment(EndDatastoreParams{ + Tracer: tr, + Start: t2, + Now: start.Add(3 * time.Second), + Product: "MySQL", + Operation: "SELECT", + Collection: "my_table", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + QueryParameters: vetQueryParameters(map[string]interface{}{"zip": 1}), + Database: "my_db", + Host: "db-server-1", + PortPathOrID: "3306", + }) + t3 := StartSegment(tr, start.Add(4*time.Second)) + EndExternalSegment(tr, t3, start.Add(5*time.Second), parseURL("http://example.com/zip/zap?secret=shhh")) + EndBasicSegment(tr, t1, start.Add(6*time.Second), "t1") + t4 := StartSegment(tr, start.Add(7*time.Second)) + t5 := StartSegment(tr, start.Add(8*time.Second)) + t6 := StartSegment(tr, start.Add(9*time.Second)) + EndBasicSegment(tr, t6, start.Add(10*time.Second), "t6") + EndBasicSegment(tr, t5, start.Add(11*time.Second), "t5") + t7 := StartSegment(tr, start.Add(12*time.Second)) + EndDatastoreSegment(EndDatastoreParams{ + Tracer: tr, + Start: t7, + Now: start.Add(13 * time.Second), + Product: "MySQL", + Operation: "SELECT", + // no collection + }) + t8 := StartSegment(tr, start.Add(14*time.Second)) + EndExternalSegment(tr, t8, start.Add(15*time.Second), nil) + EndBasicSegment(tr, t4, start.Add(16*time.Second), "t4") + + acfg := CreateAttributeConfig(sampleAttributeConfigInput) + attr := NewAttributes(acfg) + attr.Agent.RequestMethod = "GET" + AddUserAttribute(attr, "zap", 123, DestAll) + + ht := HarvestTrace{ + TxnEvent: TxnEvent{ + Start: start, + Duration: 20 * time.Second, + FinalName: "WebTransaction/Go/hello", + CleanURL: "/url", + Attrs: attr, + }, + Trace: tr.TxnTrace, + } + + expect := `[ + 1417136460000000, + 20000, + "WebTransaction/Go/hello", + "/url", + [ + 0, + {}, + {}, + [ + 0, + 20000, + "ROOT", + {}, + [ + [ + 0, + 20000, + "WebTransaction/Go/hello", + {}, + [ + [ + 1000, + 6000, + "Custom/t1", + {}, + [ + [ + 2000, + 3000, + "Datastore/statement/MySQL/my_table/SELECT", + { + "database_name":"my_db", + "host":"db-server-1", + "port_path_or_id":"3306", + "query":"INSERT INTO users (name, age) VALUES ($1, $2)", + "query_parameters":{ + "zip":1 + } + }, + [] + ], + [ + 4000, + 5000, + "External/example.com/all", + { + "uri":"http://example.com/zip/zap" + }, + [] + ] + ] + ], + [ + 7000, + 16000, + "Custom/t4", + {}, + [ + [ + 8000, + 11000, + "Custom/t5", + {}, + [ + [ + 9000, + 10000, + "Custom/t6", + {}, + [] + ] + ] + ], + [ + 12000, + 13000, + "Datastore/operation/MySQL/SELECT", + { + "query":"'SELECT' on 'unknown' using 'MySQL'" + }, + [] + ], + [ + 14000, + 15000, + "External/unknown/all", + {}, + [] + ] + ] + ] + ] + ] + ] + ], + { + "agentAttributes":{ + "request.method":"GET" + }, + "userAttributes":{ + "zap":123 + }, + "intrinsics":{} + } + ], + "", + null, + false, + null, + "" + ]` + + expect = CompactJSONString(expect) + js, err := ht.MarshalJSON() + if nil != err { + t.Fatal(err) + } + if string(js) != expect { + t.Error(string(js), expect) + } +} + +func TestTxnTraceNoSegmentsNoAttributes(t *testing.T) { + start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + tr := &TxnData{} + tr.TxnTrace.Enabled = true + tr.TxnTrace.StackTraceThreshold = 1 * time.Hour + tr.TxnTrace.SegmentThreshold = 0 + + acfg := CreateAttributeConfig(sampleAttributeConfigInput) + attr := NewAttributes(acfg) + + ht := HarvestTrace{ + TxnEvent: TxnEvent{ + Start: start, + Duration: 20 * time.Second, + FinalName: "WebTransaction/Go/hello", + CleanURL: "/url", + Attrs: attr, + }, + Trace: tr.TxnTrace, + } + + expect := `[ + 1417136460000000, + 20000, + "WebTransaction/Go/hello", + "/url", + [ + 0, + {}, + {}, + [ + 0, + 20000, + "ROOT", + {}, + [ + [ + 0, + 20000, + "WebTransaction/Go/hello", + {}, + [] + ] + ] + ], + { + "agentAttributes":{}, + "userAttributes":{}, + "intrinsics":{} + } + ], + "", + null, + false, + null, + "" + ]` + expect = CompactJSONString(expect) + js, err := ht.MarshalJSON() + if nil != err { + t.Fatal(err) + } + if string(js) != expect { + t.Error(string(js), expect) + } +} + +func TestTxnTraceSlowestNodesSaved(t *testing.T) { + start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + tr := &TxnData{} + tr.TxnTrace.Enabled = true + tr.TxnTrace.StackTraceThreshold = 1 * time.Hour + tr.TxnTrace.SegmentThreshold = 0 + tr.TxnTrace.maxNodes = 5 + + durations := []int{5, 4, 6, 3, 7, 2, 8, 1, 9} + now := start + for _, d := range durations { + s := StartSegment(tr, now) + now = now.Add(time.Duration(d) * time.Second) + EndBasicSegment(tr, s, now, strconv.Itoa(d)) + } + + acfg := CreateAttributeConfig(sampleAttributeConfigInput) + attr := NewAttributes(acfg) + + ht := HarvestTrace{ + TxnEvent: TxnEvent{ + Start: start, + Duration: 123 * time.Second, + FinalName: "WebTransaction/Go/hello", + CleanURL: "/url", + Attrs: attr, + }, + Trace: tr.TxnTrace, + } + + expect := `[ + 1417136460000000, + 123000, + "WebTransaction/Go/hello", + "/url", + [ + 0, + {}, + {}, + [ + 0, + 123000, + "ROOT", + {}, + [ + [ + 0, + 123000, + "WebTransaction/Go/hello", + {}, + [ + [ + 0, + 5000, + "Custom/5", + {}, + [] + ], + [ + 9000, + 15000, + "Custom/6", + {}, + [] + ], + [ + 18000, + 25000, + "Custom/7", + {}, + [] + ], + [ + 27000, + 35000, + "Custom/8", + {}, + [] + ], + [ + 36000, + 45000, + "Custom/9", + {}, + [] + ] + ] + ] + ] + ], + { + "agentAttributes":{}, + "userAttributes":{}, + "intrinsics":{} + } + ], + "", + null, + false, + null, + "" + ]` + expect = CompactJSONString(expect) + js, err := ht.MarshalJSON() + if nil != err { + t.Fatal(err) + } + if string(js) != expect { + t.Error(string(js), expect) + } +} + +func TestTxnTraceSegmentThreshold(t *testing.T) { + start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + tr := &TxnData{} + tr.TxnTrace.Enabled = true + tr.TxnTrace.StackTraceThreshold = 1 * time.Hour + tr.TxnTrace.SegmentThreshold = 7 * time.Second + tr.TxnTrace.maxNodes = 5 + + durations := []int{5, 4, 6, 3, 7, 2, 8, 1, 9} + now := start + for _, d := range durations { + s := StartSegment(tr, now) + now = now.Add(time.Duration(d) * time.Second) + EndBasicSegment(tr, s, now, strconv.Itoa(d)) + } + + acfg := CreateAttributeConfig(sampleAttributeConfigInput) + attr := NewAttributes(acfg) + + ht := HarvestTrace{ + TxnEvent: TxnEvent{ + Start: start, + Duration: 123 * time.Second, + FinalName: "WebTransaction/Go/hello", + CleanURL: "/url", + Attrs: attr, + }, + Trace: tr.TxnTrace, + } + + expect := `[ + 1417136460000000, + 123000, + "WebTransaction/Go/hello", + "/url", + [ + 0, + {}, + {}, + [ + 0, + 123000, + "ROOT", + {}, + [ + [ + 0, + 123000, + "WebTransaction/Go/hello", + {}, + [ + [ + 18000, + 25000, + "Custom/7", + {}, + [] + ], + [ + 27000, + 35000, + "Custom/8", + {}, + [] + ], + [ + 36000, + 45000, + "Custom/9", + {}, + [] + ] + ] + ] + ] + ], + { + "agentAttributes":{}, + "userAttributes":{}, + "intrinsics":{} + } + ], + "", + null, + false, + null, + "" + ]` + expect = CompactJSONString(expect) + js, err := ht.MarshalJSON() + if nil != err { + t.Fatal(err) + } + if string(js) != expect { + t.Error(string(js), expect) + } +} + +func TestEmptyHarvestTraces(t *testing.T) { + start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + ht := newHarvestTraces() + js, err := ht.Data("12345", start) + if nil != err || nil != js { + t.Error(string(js), err) + } +} + +func TestLongestTraceSaved(t *testing.T) { + start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + tr := &TxnData{} + tr.TxnTrace.Enabled = true + + acfg := CreateAttributeConfig(sampleAttributeConfigInput) + attr := NewAttributes(acfg) + ht := newHarvestTraces() + + ht.Witness(HarvestTrace{ + TxnEvent: TxnEvent{ + Start: start, + Duration: 3 * time.Second, + FinalName: "WebTransaction/Go/3", + CleanURL: "/url/3", + Attrs: attr, + }, + Trace: tr.TxnTrace, + }) + ht.Witness(HarvestTrace{ + TxnEvent: TxnEvent{ + Start: start, + Duration: 5 * time.Second, + FinalName: "WebTransaction/Go/5", + CleanURL: "/url/5", + Attrs: attr, + }, + Trace: tr.TxnTrace, + }) + ht.Witness(HarvestTrace{ + TxnEvent: TxnEvent{ + Start: start, + Duration: 4 * time.Second, + FinalName: "WebTransaction/Go/4", + CleanURL: "/url/4", + Attrs: attr, + }, + Trace: tr.TxnTrace, + }) + + expect := CompactJSONString(` +[ + "12345", + [ + [ + 1417136460000000,5000,"WebTransaction/Go/5","/url/5", + [ + 0,{},{}, + [0,5000,"ROOT",{}, + [[0,5000,"WebTransaction/Go/5",{},[]]] + ], + { + "agentAttributes":{}, + "userAttributes":{}, + "intrinsics":{} + } + ], + "",null,false,null,"" + ] + ] +]`) + js, err := ht.Data("12345", start) + if nil != err || string(js) != expect { + t.Error(err, string(js), expect) + } +} + +func TestTxnTraceStackTraceThreshold(t *testing.T) { + start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC) + tr := &TxnData{} + tr.TxnTrace.Enabled = true + tr.TxnTrace.StackTraceThreshold = 2 * time.Second + tr.TxnTrace.SegmentThreshold = 0 + tr.TxnTrace.maxNodes = 5 + + // below stack trace threshold + t1 := StartSegment(tr, start.Add(1*time.Second)) + EndBasicSegment(tr, t1, start.Add(2*time.Second), "t1") + + // not above stack trace threshold w/out params + t2 := StartSegment(tr, start.Add(2*time.Second)) + EndDatastoreSegment(EndDatastoreParams{ + Tracer: tr, + Start: t2, + Now: start.Add(4 * time.Second), + Product: "MySQL", + Collection: "my_table", + Operation: "SELECT", + }) + + // node above stack trace threshold w/ params + t3 := StartSegment(tr, start.Add(4*time.Second)) + EndExternalSegment(tr, t3, start.Add(6*time.Second), parseURL("http://example.com/zip/zap?secret=shhh")) + + p := tr.TxnTrace.nodes[0].params + if nil != p { + t.Error(p) + } + p = tr.TxnTrace.nodes[1].params + if nil == p || nil == p.StackTrace || "" != p.CleanURL { + t.Error(p) + } + p = tr.TxnTrace.nodes[2].params + if nil == p || nil == p.StackTrace || "http://example.com/zip/zap" != p.CleanURL { + t.Error(p) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/url.go b/vendor/github.com/newrelic/go-agent/internal/url.go new file mode 100644 index 00000000..21976ee4 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/url.go @@ -0,0 +1,43 @@ +package internal + +import "net/url" + +// SafeURL removes sensitive information from a URL. +func SafeURL(u *url.URL) string { + if nil == u { + return "" + } + if "" != u.Opaque { + // If the URL is opaque, we cannot be sure if it contains + // sensitive information. + return "" + } + + // Omit user, query, and fragment information for security. + ur := url.URL{ + Scheme: u.Scheme, + Host: u.Host, + Path: u.Path, + } + return ur.String() +} + +// SafeURLFromString removes sensitive information from a URL. +func SafeURLFromString(rawurl string) string { + u, err := url.Parse(rawurl) + if nil != err { + return "" + } + return SafeURL(u) +} + +// HostFromURL returns the URL's host. +func HostFromURL(u *url.URL) string { + if nil == u { + return "" + } + if "" != u.Opaque { + return "opaque" + } + return u.Host +} diff --git a/vendor/github.com/newrelic/go-agent/internal/url_test.go b/vendor/github.com/newrelic/go-agent/internal/url_test.go new file mode 100644 index 00000000..3b1c8b08 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/url_test.go @@ -0,0 +1,85 @@ +package internal + +import ( + "net/url" + "strings" + "testing" + + "github.com/newrelic/go-agent/internal/crossagent" +) + +func TestSafeURLNil(t *testing.T) { + if out := SafeURL(nil); "" != out { + t.Error(out) + } +} + +func TestSafeURL(t *testing.T) { + var testcases []struct { + Testname string `json:"testname"` + Expect string `json:"expected"` + Input string `json:"input"` + } + + err := crossagent.ReadJSON("url_clean.json", &testcases) + if err != nil { + t.Fatal(err) + } + + for _, tc := range testcases { + if strings.Contains(tc.Input, ";") { + // This test case was over defensive: + // http://www.ietf.org/rfc/rfc3986.txt + continue + } + + // Only use testcases which have a scheme, otherwise the urls + // may not be valid and may not be correctly handled by + // url.Parse. + if strings.HasPrefix(tc.Input, "p:") { + u, err := url.Parse(tc.Input) + if nil != err { + t.Error(tc.Testname, tc.Input, err) + continue + } + out := SafeURL(u) + if out != tc.Expect { + t.Error(tc.Testname, tc.Input, tc.Expect) + } + } + } +} + +func TestSafeURLFromString(t *testing.T) { + out := SafeURLFromString(`http://localhost:8000/hello?zip=zap`) + if `http://localhost:8000/hello` != out { + t.Error(out) + } + out = SafeURLFromString("?????") + if "" != out { + t.Error(out) + } +} + +func TestHostFromURL(t *testing.T) { + u, err := url.Parse("http://example.com/zip/zap?secret=shh") + if nil != err { + t.Fatal(err) + } + host := HostFromURL(u) + if host != "example.com" { + t.Error(host) + } + host = HostFromURL(nil) + if host != "" { + t.Error(host) + } + u, err = url.Parse("scheme:opaque") + if nil != err { + t.Fatal(err) + } + host = HostFromURL(u) + if host != "opaque" { + t.Error(host) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/utilities.go b/vendor/github.com/newrelic/go-agent/internal/utilities.go new file mode 100644 index 00000000..12674187 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/utilities.go @@ -0,0 +1,80 @@ +package internal + +import ( + "bytes" + "encoding/json" + "strings" + "time" +) + +// JSONString assists in logging JSON: Based on the formatter used to log +// Context contents, the contents could be marshalled as JSON or just printed +// directly. +type JSONString string + +// MarshalJSON returns the JSONString unmodified without any escaping. +func (js JSONString) MarshalJSON() ([]byte, error) { + if "" == js { + return []byte("null"), nil + } + return []byte(js), nil +} + +func removeFirstSegment(name string) string { + idx := strings.Index(name, "/") + if -1 == idx { + return name + } + return name[idx+1:] +} + +func timeToFloatSeconds(t time.Time) float64 { + return float64(t.UnixNano()) / float64(1000*1000*1000) +} + +func timeToFloatMilliseconds(t time.Time) float64 { + return float64(t.UnixNano()) / float64(1000*1000) +} + +func floatSecondsToDuration(seconds float64) time.Duration { + nanos := seconds * 1000 * 1000 * 1000 + return time.Duration(nanos) * time.Nanosecond +} + +func absTimeDiff(t1, t2 time.Time) time.Duration { + if t1.After(t2) { + return t1.Sub(t2) + } + return t2.Sub(t1) +} + +func compactJSON(js []byte) []byte { + buf := new(bytes.Buffer) + if err := json.Compact(buf, js); err != nil { + return nil + } + return buf.Bytes() +} + +// CompactJSONString removes the whitespace from a JSON string. +func CompactJSONString(js string) string { + out := compactJSON([]byte(js)) + return string(out) +} + +// StringLengthByteLimit truncates strings using a byte-limit boundary and +// avoids terminating in the middle of a multibyte character. +func StringLengthByteLimit(str string, byteLimit int) string { + if len(str) <= byteLimit { + return str + } + + limitIndex := 0 + for pos := range str { + if pos > byteLimit { + break + } + limitIndex = pos + } + return str[0:limitIndex] +} diff --git a/vendor/github.com/newrelic/go-agent/internal/utilities_test.go b/vendor/github.com/newrelic/go-agent/internal/utilities_test.go new file mode 100644 index 00000000..70490edf --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/utilities_test.go @@ -0,0 +1,99 @@ +package internal + +import ( + "testing" + "time" +) + +func TestRemoveFirstSegment(t *testing.T) { + testcases := []struct { + input string + expected string + }{ + {input: "no_seperators", expected: "no_seperators"}, + {input: "heyo/zip/zap", expected: "zip/zap"}, + {input: "ends_in_slash/", expected: ""}, + {input: "☃☃☃/✓✓✓/heyo", expected: "✓✓✓/heyo"}, + {input: "☃☃☃/", expected: ""}, + {input: "/", expected: ""}, + {input: "", expected: ""}, + } + + for _, tc := range testcases { + out := removeFirstSegment(tc.input) + if out != tc.expected { + t.Fatal(tc.input, out, tc.expected) + } + } +} + +func TestFloatSecondsToDuration(t *testing.T) { + if d := floatSecondsToDuration(0.123); d != 123*time.Millisecond { + t.Error(d) + } + if d := floatSecondsToDuration(456.0); d != 456*time.Second { + t.Error(d) + } +} + +func TestAbsTimeDiff(t *testing.T) { + diff := 5 * time.Second + before := time.Now() + after := before.Add(5 * time.Second) + + if out := absTimeDiff(before, after); out != diff { + t.Error(out, diff) + } + if out := absTimeDiff(after, before); out != diff { + t.Error(out, diff) + } + if out := absTimeDiff(after, after); out != 0 { + t.Error(out) + } +} + +func TestTimeToFloatMilliseconds(t *testing.T) { + tm := time.Unix(123, 456789000) + if ms := timeToFloatMilliseconds(tm); ms != 123456.789 { + t.Error(ms) + } +} + +func TestCompactJSON(t *testing.T) { + in := ` + { "zip": 1}` + out := CompactJSONString(in) + if out != `{"zip":1}` { + t.Fatal(in, out) + } +} + +func TestStringLengthByteLimit(t *testing.T) { + testcases := []struct { + input string + limit int + expect string + }{ + {"", 255, ""}, + {"awesome", -1, ""}, + {"awesome", 0, ""}, + {"awesome", 1, "a"}, + {"awesome", 7, "awesome"}, + {"awesome", 20, "awesome"}, + {"日本\x80語", 10, "日本\x80語"}, // bad unicode + {"日本", 1, ""}, + {"日本", 2, ""}, + {"日本", 3, "日"}, + {"日本", 4, "日"}, + {"日本", 5, "日"}, + {"日本", 6, "日本"}, + {"日本", 7, "日本"}, + } + + for _, tc := range testcases { + out := StringLengthByteLimit(tc.input, tc.limit) + if out != tc.expect { + t.Error(tc.input, tc.limit, tc.expect, out) + } + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/utilization/aws.go b/vendor/github.com/newrelic/go-agent/internal/utilization/aws.go new file mode 100644 index 00000000..2a557ceb --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/utilization/aws.go @@ -0,0 +1,121 @@ +package utilization + +import ( + "fmt" + "io" + "net/http" + "time" +) + +const ( + maxResponseLengthBytes = 255 + + // AWS data gathering requires making three web requests, therefore this + // timeout is in keeping with the spec's total timeout of 1 second. + individualConnectionTimeout = 300 * time.Millisecond +) + +const ( + awsHost = "169.254.169.254" + + typeEndpointPath = "/2008-02-01/meta-data/instance-type" + idEndpointPath = "/2008-02-01/meta-data/instance-id" + zoneEndpointPath = "/2008-02-01/meta-data/placement/availability-zone" + + typeEndpoint = "http://" + awsHost + typeEndpointPath + idEndpoint = "http://" + awsHost + idEndpointPath + zoneEndpoint = "http://" + awsHost + zoneEndpointPath +) + +// awsValidationError represents a response from an AWS endpoint that doesn't +// match the format expectations. +type awsValidationError struct { + e error +} + +func (a awsValidationError) Error() string { + return a.e.Error() +} + +func isAWSValidationError(e error) bool { + _, is := e.(awsValidationError) + return is +} + +func getAWS() (*vendor, error) { + return getEndpoints(&http.Client{ + Timeout: individualConnectionTimeout, + }) +} + +func getEndpoints(client *http.Client) (*vendor, error) { + v := &vendor{} + var err error + + v.ID, err = getAndValidate(client, idEndpoint) + if err != nil { + return nil, err + } + v.Type, err = getAndValidate(client, typeEndpoint) + if err != nil { + return nil, err + } + v.Zone, err = getAndValidate(client, zoneEndpoint) + if err != nil { + return nil, err + } + + return v, nil +} + +func getAndValidate(client *http.Client, endpoint string) (string, error) { + response, err := client.Get(endpoint) + if err != nil { + return "", err + } + defer response.Body.Close() + + if response.StatusCode != 200 { + return "", fmt.Errorf("unexpected response code %d", response.StatusCode) + } + + b := make([]byte, maxResponseLengthBytes+1) + num, err := response.Body.Read(b) + if err != nil && err != io.EOF { + return "", err + } + + if num > maxResponseLengthBytes { + return "", awsValidationError{ + fmt.Errorf("maximum length %d exceeded", maxResponseLengthBytes), + } + } + + responseText := string(b[:num]) + + for _, r := range responseText { + if !isAcceptableRune(r) { + return "", awsValidationError{ + fmt.Errorf("invalid character %x", r), + } + } + } + + return responseText, nil +} + +// See: +// https://source.datanerd.us/agents/agent-specs/blob/master/Utilization.md#normalizing-aws-data +func isAcceptableRune(r rune) bool { + switch r { + case 0xFFFD: + return false + case '_', ' ', '/', '.', '-': + return true + default: + return r > 0x7f || + ('0' <= r && r <= '9') || + ('a' <= r && r <= 'z') || + ('A' <= r && r <= 'Z') + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/utilization/aws_test.go b/vendor/github.com/newrelic/go-agent/internal/utilization/aws_test.go new file mode 100644 index 00000000..d878d9dc --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/utilization/aws_test.go @@ -0,0 +1,145 @@ +package utilization + +import ( + "errors" + "fmt" + "net/http" + "strings" + "testing" + + "github.com/newrelic/go-agent/internal/crossagent" +) + +type maybeResponse struct { + Response string `json:"response"` + Timeout bool `json:"timeout"` +} + +type mockTransport struct { + ID, Type, Zone maybeResponse + reader *strings.Reader + closed int +} + +func (m *mockTransport) RoundTrip(r *http.Request) (*http.Response, error) { + var response maybeResponse + + if r.URL.Host != awsHost { + return nil, fmt.Errorf("invalid endpoint host %s", r.URL.Host) + } + + switch r.URL.Path { + case typeEndpointPath: + response = m.Type + case zoneEndpointPath: + response = m.Zone + case idEndpointPath: + response = m.ID + default: + return nil, fmt.Errorf("invalid endpoint %s", r.URL.Path) + } + + if response.Timeout { + return nil, errors.New("timed out") + } + + m.reader = strings.NewReader(response.Response) + + return &http.Response{ + StatusCode: 200, + Body: m, + }, nil +} + +func (m *mockTransport) CancelRequest(req *http.Request) { +} + +func (m *mockTransport) Read(b []byte) (int, error) { + return m.reader.Read(b) +} + +func (m *mockTransport) Close() error { + m.closed++ + m.reader = nil + return nil +} + +func TestGetAndValidateStatus(t *testing.T) { + transport := &mockTransport{Type: maybeResponse{Response: "e2-micro"}} + client := &http.Client{Transport: transport} + resp, err := getAndValidate(client, typeEndpoint) + if err != nil || resp != "e2-micro" { + t.Error(err, resp) + } + if 1 != transport.closed { + t.Error("response body not closed") + } + + transport = &mockTransport{Type: maybeResponse{Response: "e2,micro"}} + client = &http.Client{Transport: transport} + _, err = getAndValidate(client, typeEndpoint) + if err == nil || !isAWSValidationError(err) { + t.Error(err) + } + if 1 != transport.closed { + t.Error("response body not closed") + } +} + +func TestCrossagentAWS(t *testing.T) { + var testCases []struct { + Name string `json:"testname"` + URIs struct { + Type maybeResponse `json:"http://169.254.169.254/2008-02-01/meta-data/instance-type"` + ID maybeResponse `json:"http://169.254.169.254/2008-02-01/meta-data/instance-id"` + Zone maybeResponse `json:"http://169.254.169.254/2008-02-01/meta-data/placement/availability-zone"` + } `json:"uris"` + Vendors vendors `json:"expected_vendors_hash"` + Metrics struct { + Supportability struct { + CallCount int `json:"call_count"` + } `json:"Supportability/utilization/aws/error"` + } `json:"expected_metrics"` + } + + err := crossagent.ReadJSON("aws.json", &testCases) + if err != nil { + t.Fatal(err) + } + + for _, tc := range testCases { + client := &http.Client{ + Transport: &mockTransport{ + ID: tc.URIs.ID, + Type: tc.URIs.Type, + Zone: tc.URIs.Zone, + }, + } + + v, err := getEndpoints(client) + + expectInvalid := tc.Metrics.Supportability.CallCount > 0 + if expectInvalid != isAWSValidationError(err) { + t.Error(tc.Name, err, expectInvalid, isAWSValidationError(err)) + } + + expectTimeout := tc.URIs.Type.Timeout || tc.URIs.ID.Timeout || tc.URIs.Zone.Timeout + if expectTimeout && nil == err { + t.Error(tc.Name, err) + } + + if tc.Vendors.AWS != nil { + if nil == v { + t.Error(tc.Name, "missing vendor") + } else if v.ID != tc.Vendors.AWS.ID { + t.Error(tc.Name, "Id mismatch", v.ID, tc.Vendors.AWS.ID) + } else if v.Type != tc.Vendors.AWS.Type { + t.Error(tc.Name, "Type mismatch", v.Type, tc.Vendors.AWS.Type) + } else if v.Zone != tc.Vendors.AWS.Zone { + t.Error(tc.Name, "Zone mismatch", v.Zone, tc.Vendors.AWS.Zone) + } + } else if nil != v { + t.Error(tc.Name, "unexpected vendor") + } + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/utilization/utilization.go b/vendor/github.com/newrelic/go-agent/internal/utilization/utilization.go new file mode 100644 index 00000000..83d12f87 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/utilization/utilization.go @@ -0,0 +1,140 @@ +// Package utilization implements the Utilization spec, available at +// https://source.datanerd.us/agents/agent-specs/blob/master/Utilization.md +package utilization + +import ( + "runtime" + + "github.com/newrelic/go-agent/internal/logger" + "github.com/newrelic/go-agent/internal/sysinfo" +) + +const metadataVersion = 2 + +// Config controls the behavior of utilization information capture. +type Config struct { + DetectAWS bool + DetectDocker bool + LogicalProcessors int + TotalRAMMIB int + BillingHostname string +} + +type override struct { + LogicalProcessors *int `json:"logical_processors,omitempty"` + TotalRAMMIB *int `json:"total_ram_mib,omitempty"` + BillingHostname string `json:"hostname,omitempty"` +} + +// Data contains utilization system information. +type Data struct { + MetadataVersion int `json:"metadata_version"` + LogicalProcessors int `json:"logical_processors"` + RAMMib *uint64 `json:"total_ram_mib"` + Hostname string `json:"hostname"` + Vendors *vendors `json:"vendors,omitempty"` + Config *override `json:"config,omitempty"` +} + +var ( + sampleRAMMib = uint64(1024) + // SampleData contains sample utilization data useful for testing. + SampleData = Data{ + MetadataVersion: metadataVersion, + LogicalProcessors: 16, + RAMMib: &sampleRAMMib, + Hostname: "my-hostname", + } +) + +type vendor struct { + ID string `json:"id,omitempty"` + Type string `json:"type,omitempty"` + Zone string `json:"zone,omitempty"` +} + +type vendors struct { + AWS *vendor `json:"aws,omitempty"` + Docker *vendor `json:"docker,omitempty"` +} + +func overrideFromConfig(config Config) *override { + ov := &override{} + + if 0 != config.LogicalProcessors { + x := config.LogicalProcessors + ov.LogicalProcessors = &x + } + if 0 != config.TotalRAMMIB { + x := config.TotalRAMMIB + ov.TotalRAMMIB = &x + } + ov.BillingHostname = config.BillingHostname + + if "" == ov.BillingHostname && + nil == ov.LogicalProcessors && + nil == ov.TotalRAMMIB { + ov = nil + } + return ov +} + +// Gather gathers system utilization data. +func Gather(config Config, lg logger.Logger) *Data { + uDat := Data{ + MetadataVersion: metadataVersion, + Vendors: &vendors{}, + LogicalProcessors: runtime.NumCPU(), + } + + if config.DetectDocker { + id, err := sysinfo.DockerID() + if err != nil && + err != sysinfo.ErrDockerUnsupported && + err != sysinfo.ErrDockerNotFound { + lg.Warn("error gathering Docker information", map[string]interface{}{ + "error": err.Error(), + }) + } else if id != "" { + uDat.Vendors.Docker = &vendor{ID: id} + } + } + + if config.DetectAWS { + aws, err := getAWS() + if nil == err { + uDat.Vendors.AWS = aws + } else if isAWSValidationError(err) { + lg.Warn("AWS validation error", map[string]interface{}{ + "error": err.Error(), + }) + } + } + + if uDat.Vendors.AWS == nil && uDat.Vendors.Docker == nil { + uDat.Vendors = nil + } + + host, err := sysinfo.Hostname() + if nil == err { + uDat.Hostname = host + } else { + lg.Warn("error getting hostname", map[string]interface{}{ + "error": err.Error(), + }) + } + + bts, err := sysinfo.PhysicalMemoryBytes() + if nil == err { + mib := sysinfo.BytesToMebibytes(bts) + uDat.RAMMib = &mib + } else { + lg.Warn("error getting memory", map[string]interface{}{ + "error": err.Error(), + }) + } + + uDat.Config = overrideFromConfig(config) + + return &uDat +} diff --git a/vendor/github.com/newrelic/go-agent/internal/utilization/utilization_test.go b/vendor/github.com/newrelic/go-agent/internal/utilization/utilization_test.go new file mode 100644 index 00000000..c2640aa4 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/utilization/utilization_test.go @@ -0,0 +1,135 @@ +package utilization + +import ( + "encoding/json" + "testing" + + "github.com/newrelic/go-agent/internal/logger" +) + +func TestJSONMarshalling(t *testing.T) { + ramMib := uint64(1024) + processors := 16 + u := Data{ + MetadataVersion: metadataVersion, + LogicalProcessors: 4, + RAMMib: &ramMib, + Hostname: "localhost", + Vendors: &vendors{ + AWS: &vendor{ + ID: "8BADFOOD", + Type: "t2.micro", + Zone: "us-west-1", + }, + Docker: &vendor{ID: "47cbd16b77c50cbf71401"}, + }, + Config: &override{ + LogicalProcessors: &processors, + }, + } + + expect := `{ + "metadata_version": 2, + "logical_processors": 4, + "total_ram_mib": 1024, + "hostname": "localhost", + "vendors": { + "aws": { + "id": "8BADFOOD", + "type": "t2.micro", + "zone": "us-west-1" + }, + "docker": { + "id": "47cbd16b77c50cbf71401" + } + }, + "config": { + "logical_processors": 16 + } +}` + + j, err := json.MarshalIndent(u, "", "\t") + if err != nil { + t.Error(err) + } + if string(j) != expect { + t.Error(string(j), expect) + } + + // Test that we marshal not-present values to nil. + u.RAMMib = nil + u.Hostname = "" + u.Config = nil + expect = `{ + "metadata_version": 2, + "logical_processors": 4, + "total_ram_mib": null, + "hostname": "", + "vendors": { + "aws": { + "id": "8BADFOOD", + "type": "t2.micro", + "zone": "us-west-1" + }, + "docker": { + "id": "47cbd16b77c50cbf71401" + } + } +}` + + j, err = json.MarshalIndent(u, "", "\t") + if err != nil { + t.Error(err) + } + if string(j) != expect { + t.Error(string(j), expect) + } +} + +func TestUtilizationHash(t *testing.T) { + config := []Config{ + {DetectAWS: true, DetectDocker: true}, + {DetectAWS: false, DetectDocker: false}, + } + for _, c := range config { + u := Gather(c, logger.ShimLogger{}) + js, err := json.Marshal(u) + if err != nil { + t.Error(err) + } + if u.MetadataVersion == 0 || u.LogicalProcessors == 0 || + u.RAMMib == nil || *u.RAMMib == 0 || + u.Hostname == "" { + t.Fatal(u, string(js)) + } + } +} + +func TestOverrideFromConfig(t *testing.T) { + testcases := []struct { + config Config + expect string + }{ + {Config{}, `null`}, + {Config{LogicalProcessors: 16}, `{"logical_processors":16}`}, + {Config{TotalRAMMIB: 1024}, `{"total_ram_mib":1024}`}, + {Config{BillingHostname: "localhost"}, `{"hostname":"localhost"}`}, + {Config{ + LogicalProcessors: 16, + TotalRAMMIB: 1024, + BillingHostname: "localhost", + }, `{"logical_processors":16,"total_ram_mib":1024,"hostname":"localhost"}`}, + } + + for _, tc := range testcases { + ov := overrideFromConfig(tc.config) + js, err := json.Marshal(ov) + if nil != err { + t.Error(tc.expect, err) + continue + } + if string(js) != tc.expect { + t.Error(tc.expect, string(js)) + } + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal_app.go b/vendor/github.com/newrelic/go-agent/internal_app.go new file mode 100644 index 00000000..3ad5a0f1 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal_app.go @@ -0,0 +1,565 @@ +package newrelic + +import ( + "errors" + "fmt" + "net/http" + "os" + "strings" + "sync" + "time" + + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/logger" +) + +var ( + debugLogging = os.Getenv("NEW_RELIC_DEBUG_LOGGING") + redirectHost = func() string { + if s := os.Getenv("NEW_RELIC_HOST"); "" != s { + return s + } + return "collector.newrelic.com" + }() +) + +type dataConsumer interface { + Consume(internal.AgentRunID, internal.Harvestable) +} + +type appData struct { + id internal.AgentRunID + data internal.Harvestable +} + +type app struct { + config Config + attrConfig *internal.AttributeConfig + rpmControls internal.RpmControls + testHarvest *internal.Harvest + + // initiateShutdown is used to tell the processor to shutdown. + initiateShutdown chan struct{} + + // shutdownStarted and shutdownComplete are closed by the processor + // goroutine to indicate the shutdown status. Two channels are used so + // that the call of app.Shutdown() can block until shutdown has + // completed but other goroutines can exit when shutdown has started. + // This is not just an optimization: This prevents a deadlock if + // harvesting data during the shutdown fails and an attempt is made to + // merge the data into the next harvest. + shutdownStarted chan struct{} + shutdownComplete chan struct{} + + // Sends to these channels should not occur without a <-shutdownStarted + // select option to prevent deadlock. + dataChan chan appData + collectorErrorChan chan error + connectChan chan *internal.AppRun + + harvestTicker *time.Ticker + + // This mutex protects both `run` and `err`, both of which should only + // be accessed using getState and setState. + sync.RWMutex + // run is non-nil when the app is successfully connected. It is + // immutable. + run *internal.AppRun + // err is non-nil if the application will never be connected again + // (disconnect, license exception, shutdown). + err error +} + +var ( + placeholderRun = &internal.AppRun{ + ConnectReply: internal.ConnectReplyDefaults(), + } +) + +func isFatalHarvestError(e error) bool { + return internal.IsDisconnect(e) || + internal.IsLicenseException(e) || + internal.IsRestartException(e) +} + +func shouldSaveFailedHarvest(e error) bool { + if e == internal.ErrPayloadTooLarge || e == internal.ErrUnsupportedMedia { + return false + } + return true +} + +func (app *app) doHarvest(h *internal.Harvest, harvestStart time.Time, run *internal.AppRun) { + h.CreateFinalMetrics() + h.Metrics = h.Metrics.ApplyRules(run.MetricRules) + + payloads := h.Payloads() + for cmd, p := range payloads { + + data, err := p.Data(run.RunID.String(), harvestStart) + + if nil == data && nil == err { + continue + } + + if nil == err { + call := internal.RpmCmd{ + Collector: run.Collector, + RunID: run.RunID.String(), + Name: cmd, + Data: data, + } + + // The reply from harvest calls is always unused. + _, err = internal.CollectorRequest(call, app.rpmControls) + } + + if nil == err { + continue + } + + if isFatalHarvestError(err) { + select { + case app.collectorErrorChan <- err: + case <-app.shutdownStarted: + } + return + } + + app.config.Logger.Warn("harvest failure", map[string]interface{}{ + "cmd": cmd, + "error": err.Error(), + }) + + if shouldSaveFailedHarvest(err) { + app.Consume(run.RunID, p) + } + } +} + +func connectAttempt(app *app) (*internal.AppRun, error) { + js, e := configConnectJSON(app.config) + if nil != e { + return nil, e + } + return internal.ConnectAttempt(js, redirectHost, app.rpmControls) +} + +func (app *app) connectRoutine() { + for { + run, err := connectAttempt(app) + if nil == err { + select { + case app.connectChan <- run: + case <-app.shutdownStarted: + } + return + } + + if internal.IsDisconnect(err) || internal.IsLicenseException(err) { + select { + case app.collectorErrorChan <- err: + case <-app.shutdownStarted: + } + return + } + + app.config.Logger.Warn("application connect failure", map[string]interface{}{ + "error": err.Error(), + }) + + time.Sleep(internal.ConnectBackoff) + } +} + +func debug(data internal.Harvestable, lg Logger) { + now := time.Now() + h := internal.NewHarvest(now) + data.MergeIntoHarvest(h) + ps := h.Payloads() + for cmd, p := range ps { + d, err := p.Data("agent run id", now) + if nil == d && nil == err { + continue + } + if nil != err { + lg.Debug("integration", map[string]interface{}{ + "cmd": cmd, + "error": err.Error(), + }) + continue + } + lg.Debug("integration", map[string]interface{}{ + "cmd": cmd, + "data": internal.JSONString(d), + }) + } +} + +func processConnectMessages(run *internal.AppRun, lg Logger) { + for _, msg := range run.Messages { + event := "collector message" + cn := map[string]interface{}{"msg": msg.Message} + + switch strings.ToLower(msg.Level) { + case "error": + lg.Error(event, cn) + case "warn": + lg.Warn(event, cn) + case "info": + lg.Info(event, cn) + case "debug", "verbose": + lg.Debug(event, cn) + } + } +} + +func (app *app) process() { + // Both the harvest and the run are non-nil when the app is connected, + // and nil otherwise. + var h *internal.Harvest + var run *internal.AppRun + + for { + select { + case <-app.harvestTicker.C: + if nil != run { + now := time.Now() + go app.doHarvest(h, now, run) + h = internal.NewHarvest(now) + } + case d := <-app.dataChan: + if nil != run && run.RunID == d.id { + d.data.MergeIntoHarvest(h) + } + case <-app.initiateShutdown: + close(app.shutdownStarted) + + // Remove the run before merging any final data to + // ensure a bounded number of receives from dataChan. + app.setState(nil, errors.New("application shut down")) + app.harvestTicker.Stop() + + if nil != run { + for done := false; !done; { + select { + case d := <-app.dataChan: + if run.RunID == d.id { + d.data.MergeIntoHarvest(h) + } + default: + done = true + } + } + app.doHarvest(h, time.Now(), run) + } + + close(app.shutdownComplete) + return + case err := <-app.collectorErrorChan: + run = nil + h = nil + app.setState(nil, nil) + + switch { + case internal.IsDisconnect(err): + app.setState(nil, err) + app.config.Logger.Error("application disconnected by New Relic", map[string]interface{}{ + "app": app.config.AppName, + }) + case internal.IsLicenseException(err): + app.setState(nil, err) + app.config.Logger.Error("invalid license", map[string]interface{}{ + "app": app.config.AppName, + "license": app.config.License, + }) + case internal.IsRestartException(err): + app.config.Logger.Info("application restarted", map[string]interface{}{ + "app": app.config.AppName, + }) + go app.connectRoutine() + } + case run = <-app.connectChan: + h = internal.NewHarvest(time.Now()) + app.setState(run, nil) + + app.config.Logger.Info("application connected", map[string]interface{}{ + "app": app.config.AppName, + "run": run.RunID.String(), + }) + processConnectMessages(run, app.config.Logger) + } + } +} + +func (app *app) Shutdown(timeout time.Duration) { + if !app.config.Enabled { + return + } + + select { + case app.initiateShutdown <- struct{}{}: + default: + } + + // Block until shutdown is done or timeout occurs. + t := time.NewTimer(timeout) + select { + case <-app.shutdownComplete: + case <-t.C: + } + t.Stop() + + app.config.Logger.Info("application shutdown", map[string]interface{}{ + "app": app.config.AppName, + }) +} + +func convertAttributeDestinationConfig(c AttributeDestinationConfig) internal.AttributeDestinationConfig { + return internal.AttributeDestinationConfig{ + Enabled: c.Enabled, + Include: c.Include, + Exclude: c.Exclude, + } +} + +func runSampler(app *app, period time.Duration) { + previous := internal.GetSample(time.Now(), app.config.Logger) + t := time.NewTicker(period) + for { + select { + case now := <-t.C: + current := internal.GetSample(now, app.config.Logger) + run, _ := app.getState() + app.Consume(run.RunID, internal.GetStats(internal.Samples{ + Previous: previous, + Current: current, + })) + previous = current + case <-app.shutdownStarted: + t.Stop() + return + } + } +} + +func (app *app) WaitForConnection(timeout time.Duration) error { + if !app.config.Enabled { + return nil + } + deadline := time.Now().Add(timeout) + pollPeriod := 50 * time.Millisecond + + for { + run, err := app.getState() + if nil != err { + return err + } + if run.RunID != "" { + return nil + } + if time.Now().After(deadline) { + return fmt.Errorf("timeout out after %s", timeout.String()) + } + time.Sleep(pollPeriod) + } +} + +func newApp(c Config) (Application, error) { + c = copyConfigReferenceFields(c) + if err := c.Validate(); nil != err { + return nil, err + } + if nil == c.Logger { + c.Logger = logger.ShimLogger{} + } + app := &app{ + config: c, + attrConfig: internal.CreateAttributeConfig(internal.AttributeConfigInput{ + Attributes: convertAttributeDestinationConfig(c.Attributes), + ErrorCollector: convertAttributeDestinationConfig(c.ErrorCollector.Attributes), + TransactionEvents: convertAttributeDestinationConfig(c.TransactionEvents.Attributes), + TransactionTracer: convertAttributeDestinationConfig(c.TransactionTracer.Attributes), + }), + + // This channel must be buffered since Shutdown makes a + // non-blocking send attempt. + initiateShutdown: make(chan struct{}, 1), + + shutdownStarted: make(chan struct{}), + shutdownComplete: make(chan struct{}), + connectChan: make(chan *internal.AppRun, 1), + collectorErrorChan: make(chan error, 1), + dataChan: make(chan appData, internal.AppDataChanSize), + rpmControls: internal.RpmControls{ + UseTLS: c.UseTLS, + License: c.License, + Client: &http.Client{ + Transport: c.Transport, + Timeout: internal.CollectorTimeout, + }, + Logger: c.Logger, + AgentVersion: Version, + }, + } + + app.config.Logger.Info("application created", map[string]interface{}{ + "app": app.config.AppName, + "version": Version, + "enabled": app.config.Enabled, + }) + + if !app.config.Enabled { + return app, nil + } + + app.harvestTicker = time.NewTicker(internal.HarvestPeriod) + + go app.process() + go app.connectRoutine() + + if app.config.RuntimeSampler.Enabled { + go runSampler(app, internal.RuntimeSamplerPeriod) + } + + return app, nil +} + +type expectApp interface { + internal.Expect + Application +} + +func newTestApp(replyfn func(*internal.ConnectReply), cfg Config) (expectApp, error) { + cfg.Enabled = false + application, err := newApp(cfg) + if nil != err { + return nil, err + } + app := application.(*app) + if nil != replyfn { + reply := internal.ConnectReplyDefaults() + replyfn(reply) + app.setState(&internal.AppRun{ConnectReply: reply}, nil) + } + + app.testHarvest = internal.NewHarvest(time.Now()) + + return app, nil +} + +func (app *app) getState() (*internal.AppRun, error) { + app.RLock() + defer app.RUnlock() + + run := app.run + if nil == run { + run = placeholderRun + } + return run, app.err +} + +func (app *app) setState(run *internal.AppRun, err error) { + app.Lock() + defer app.Unlock() + + app.run = run + app.err = err +} + +// StartTransaction implements newrelic.Application's StartTransaction. +func (app *app) StartTransaction(name string, w http.ResponseWriter, r *http.Request) Transaction { + run, _ := app.getState() + return upgradeTxn(newTxn(txnInput{ + Config: app.config, + Reply: run.ConnectReply, + W: w, + Consumer: app, + attrConfig: app.attrConfig, + }, r, name)) +} + +var ( + errHighSecurityEnabled = errors.New("high security enabled") + errCustomEventsDisabled = errors.New("custom events disabled") + errCustomEventsRemoteDisabled = errors.New("custom events disabled by server") +) + +// RecordCustomEvent implements newrelic.Application's RecordCustomEvent. +func (app *app) RecordCustomEvent(eventType string, params map[string]interface{}) error { + if app.config.HighSecurity { + return errHighSecurityEnabled + } + + if !app.config.CustomInsightsEvents.Enabled { + return errCustomEventsDisabled + } + + event, e := internal.CreateCustomEvent(eventType, params, time.Now()) + if nil != e { + return e + } + + run, _ := app.getState() + if !run.CollectCustomEvents { + return errCustomEventsRemoteDisabled + } + + app.Consume(run.RunID, event) + + return nil +} + +func (app *app) Consume(id internal.AgentRunID, data internal.Harvestable) { + if "" != debugLogging { + debug(data, app.config.Logger) + } + + if nil != app.testHarvest { + data.MergeIntoHarvest(app.testHarvest) + return + } + + if "" == id { + return + } + + select { + case app.dataChan <- appData{id, data}: + case <-app.shutdownStarted: + } +} + +func (app *app) ExpectCustomEvents(t internal.Validator, want []internal.WantEvent) { + internal.ExpectCustomEvents(internal.ExtendValidator(t, "custom events"), app.testHarvest.CustomEvents, want) +} + +func (app *app) ExpectErrors(t internal.Validator, want []internal.WantError) { + t = internal.ExtendValidator(t, "traced errors") + internal.ExpectErrors(t, app.testHarvest.ErrorTraces, want) +} + +func (app *app) ExpectErrorEvents(t internal.Validator, want []internal.WantEvent) { + t = internal.ExtendValidator(t, "error events") + internal.ExpectErrorEvents(t, app.testHarvest.ErrorEvents, want) +} + +func (app *app) ExpectTxnEvents(t internal.Validator, want []internal.WantEvent) { + t = internal.ExtendValidator(t, "txn events") + internal.ExpectTxnEvents(t, app.testHarvest.TxnEvents, want) +} + +func (app *app) ExpectMetrics(t internal.Validator, want []internal.WantMetric) { + t = internal.ExtendValidator(t, "metrics") + internal.ExpectMetrics(t, app.testHarvest.Metrics, want) +} + +func (app *app) ExpectTxnTraces(t internal.Validator, want []internal.WantTxnTrace) { + t = internal.ExtendValidator(t, "txn traces") + internal.ExpectTxnTraces(t, app.testHarvest.TxnTraces, want) +} + +func (app *app) ExpectSlowQueries(t internal.Validator, want []internal.WantSlowQuery) { + t = internal.ExtendValidator(t, "slow queries") + internal.ExpectSlowQueries(t, app.testHarvest.SlowSQLs, want) +} diff --git a/vendor/github.com/newrelic/go-agent/internal_attributes_test.go b/vendor/github.com/newrelic/go-agent/internal_attributes_test.go new file mode 100644 index 00000000..1337066d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal_attributes_test.go @@ -0,0 +1,425 @@ +package newrelic + +import ( + "errors" + "net/http" + "testing" + + "github.com/newrelic/go-agent/internal" +) + +func TestUserAttributeBasics(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.TransactionTracer.Threshold.IsApdexFailing = false + cfg.TransactionTracer.Threshold.Duration = 0 + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, nil) + + txn.NoticeError(errors.New("zap")) + + if err := txn.AddAttribute(`int\key`, 1); nil != err { + t.Error(err) + } + if err := txn.AddAttribute(`str\key`, `zip\zap`); nil != err { + t.Error(err) + } + err := txn.AddAttribute("invalid_value", struct{}{}) + if _, ok := err.(internal.ErrInvalidAttribute); !ok { + t.Error(err) + } + txn.End() + if err := txn.AddAttribute("already_ended", "zap"); err != errAlreadyEnded { + t.Error(err) + } + + agentAttributes := map[string]interface{}{} + userAttributes := map[string]interface{}{`int\key`: 1, `str\key`: `zip\zap`} + + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "OtherTransaction/Go/hello", + }, + AgentAttributes: agentAttributes, + UserAttributes: userAttributes, + }}) + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "OtherTransaction/Go/hello", + Msg: "zap", + Klass: "*errors.errorString", + Caller: "go-agent.TestUserAttributeBasics", + URL: "", + AgentAttributes: agentAttributes, + UserAttributes: userAttributes, + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "*errors.errorString", + "error.message": "zap", + "transactionName": "OtherTransaction/Go/hello", + }, + AgentAttributes: agentAttributes, + UserAttributes: userAttributes, + }}) + app.ExpectTxnTraces(t, []internal.WantTxnTrace{{ + MetricName: "OtherTransaction/Go/hello", + CleanURL: "", + NumSegments: 0, + AgentAttributes: agentAttributes, + UserAttributes: userAttributes, + }}) +} + +func TestUserAttributeConfiguration(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.TransactionEvents.Attributes.Exclude = []string{"only_errors", "only_txn_traces"} + cfg.ErrorCollector.Attributes.Exclude = []string{"only_txn_events", "only_txn_traces"} + cfg.TransactionTracer.Attributes.Exclude = []string{"only_txn_events", "only_errors"} + cfg.Attributes.Exclude = []string{"completed_excluded"} + cfg.TransactionTracer.Threshold.IsApdexFailing = false + cfg.TransactionTracer.Threshold.Duration = 0 + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, nil) + + txn.NoticeError(errors.New("zap")) + + if err := txn.AddAttribute("only_errors", 1); nil != err { + t.Error(err) + } + if err := txn.AddAttribute("only_txn_events", 2); nil != err { + t.Error(err) + } + if err := txn.AddAttribute("only_txn_traces", 3); nil != err { + t.Error(err) + } + if err := txn.AddAttribute("completed_excluded", 4); nil != err { + t.Error(err) + } + txn.End() + + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "OtherTransaction/Go/hello", + }, + AgentAttributes: map[string]interface{}{}, + UserAttributes: map[string]interface{}{"only_txn_events": 2}, + }}) + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "OtherTransaction/Go/hello", + Msg: "zap", + Klass: "*errors.errorString", + Caller: "go-agent.TestUserAttributeConfiguration", + URL: "", + AgentAttributes: map[string]interface{}{}, + UserAttributes: map[string]interface{}{"only_errors": 1}, + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "*errors.errorString", + "error.message": "zap", + "transactionName": "OtherTransaction/Go/hello", + }, + AgentAttributes: map[string]interface{}{}, + UserAttributes: map[string]interface{}{"only_errors": 1}, + }}) + app.ExpectTxnTraces(t, []internal.WantTxnTrace{{ + MetricName: "OtherTransaction/Go/hello", + CleanURL: "", + NumSegments: 0, + AgentAttributes: map[string]interface{}{}, + UserAttributes: map[string]interface{}{"only_txn_traces": 3}, + }}) +} + +// Second attributes have priority. +func mergeAttributes(a1, a2 map[string]interface{}) map[string]interface{} { + a := make(map[string]interface{}) + for k, v := range a1 { + a[k] = v + } + for k, v := range a2 { + a[k] = v + } + return a +} + +var ( + // Agent attributes expected in txn events from usualAttributeTestTransaction. + agent1 = map[string]interface{}{ + AttributeHostDisplayName: `my\host\display\name`, + AttributeResponseCode: `404`, + AttributeResponseContentType: `text/plain; charset=us-ascii`, + AttributeResponseContentLength: 345, + AttributeRequestMethod: "GET", + AttributeRequestAccept: "text/plain", + AttributeRequestContentType: "text/html; charset=utf-8", + AttributeRequestContentLength: 753, + AttributeRequestHost: "my_domain.com", + } + // Agent attributes expected in errors and traces from usualAttributeTestTransaction. + agent2 = mergeAttributes(agent1, map[string]interface{}{ + AttributeRequestUserAgent: "Mozilla/5.0", + AttributeRequestReferer: "http://en.wikipedia.org/zip", + }) + // User attributes expected from usualAttributeTestTransaction. + user1 = map[string]interface{}{ + "myStr": "hello", + } +) + +func agentAttributeTestcase(t testing.TB, cfgfn func(cfg *Config), e AttributeExpect) { + app := testApp(nil, func(cfg *Config) { + cfg.HostDisplayName = `my\host\display\name` + cfg.TransactionTracer.Threshold.IsApdexFailing = false + cfg.TransactionTracer.Threshold.Duration = 0 + if nil != cfgfn { + cfgfn(cfg) + } + }, t) + w := newCompatibleResponseRecorder() + txn := app.StartTransaction("hello", w, helloRequest) + txn.NoticeError(errors.New("zap")) + + hdr := txn.Header() + hdr.Set("Content-Type", `text/plain; charset=us-ascii`) + hdr.Set("Content-Length", `345`) + + txn.WriteHeader(404) + txn.AddAttribute("myStr", "hello") + + txn.End() + + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/hello", + "nr.apdexPerfZone": "F", + }, + AgentAttributes: e.TxnEvent.Agent, + UserAttributes: e.TxnEvent.User, + }}) + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "WebTransaction/Go/hello", + Msg: "zap", + Klass: "*errors.errorString", + Caller: "go-agent.agentAttributeTestcase", + URL: "/hello", + AgentAttributes: e.Error.Agent, + UserAttributes: e.Error.User, + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "*errors.errorString", + "error.message": "zap", + "transactionName": "WebTransaction/Go/hello", + }, + AgentAttributes: e.Error.Agent, + UserAttributes: e.Error.User, + }}) + app.ExpectTxnTraces(t, []internal.WantTxnTrace{{ + MetricName: "WebTransaction/Go/hello", + CleanURL: "/hello", + NumSegments: 0, + AgentAttributes: e.TxnTrace.Agent, + UserAttributes: e.TxnTrace.User, + }}) +} + +type UserAgent struct { + User map[string]interface{} + Agent map[string]interface{} +} + +type AttributeExpect struct { + TxnEvent UserAgent + Error UserAgent + TxnTrace UserAgent +} + +func TestAgentAttributes(t *testing.T) { + agentAttributeTestcase(t, nil, AttributeExpect{ + TxnEvent: UserAgent{ + Agent: agent1, + User: user1}, + Error: UserAgent{ + Agent: agent2, + User: user1}, + }) +} + +func TestAttributesDisabled(t *testing.T) { + agentAttributeTestcase(t, func(cfg *Config) { + cfg.Attributes.Enabled = false + }, AttributeExpect{ + TxnEvent: UserAgent{ + Agent: map[string]interface{}{}, + User: map[string]interface{}{}}, + Error: UserAgent{ + Agent: map[string]interface{}{}, + User: map[string]interface{}{}}, + TxnTrace: UserAgent{ + Agent: map[string]interface{}{}, + User: map[string]interface{}{}}, + }) +} + +func TestDefaultResponseCode(t *testing.T) { + app := testApp(nil, nil, t) + w := newCompatibleResponseRecorder() + txn := app.StartTransaction("hello", w, &http.Request{}) + txn.Write([]byte("hello")) + txn.End() + + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/hello", + "nr.apdexPerfZone": "S", + }, + AgentAttributes: map[string]interface{}{AttributeResponseCode: 200}, + UserAttributes: map[string]interface{}{}, + }}) +} + +func TestNoResponseCode(t *testing.T) { + app := testApp(nil, nil, t) + w := newCompatibleResponseRecorder() + txn := app.StartTransaction("hello", w, &http.Request{}) + txn.End() + + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/hello", + "nr.apdexPerfZone": "S", + }, + AgentAttributes: map[string]interface{}{}, + UserAttributes: map[string]interface{}{}, + }}) +} + +func TestTxnEventAttributesDisabled(t *testing.T) { + agentAttributeTestcase(t, func(cfg *Config) { + cfg.TransactionEvents.Attributes.Enabled = false + }, AttributeExpect{ + TxnEvent: UserAgent{ + Agent: map[string]interface{}{}, + User: map[string]interface{}{}}, + Error: UserAgent{ + Agent: agent2, + User: user1}, + TxnTrace: UserAgent{ + Agent: agent2, + User: user1}, + }) +} + +func TestErrorAttributesDisabled(t *testing.T) { + agentAttributeTestcase(t, func(cfg *Config) { + cfg.ErrorCollector.Attributes.Enabled = false + }, AttributeExpect{ + TxnEvent: UserAgent{ + Agent: agent1, + User: user1}, + Error: UserAgent{ + Agent: map[string]interface{}{}, + User: map[string]interface{}{}}, + TxnTrace: UserAgent{ + Agent: agent2, + User: user1}, + }) +} + +func TestTxnTraceAttributesDisabled(t *testing.T) { + agentAttributeTestcase(t, func(cfg *Config) { + cfg.TransactionTracer.Attributes.Enabled = false + }, AttributeExpect{ + TxnEvent: UserAgent{ + Agent: agent1, + User: user1}, + Error: UserAgent{ + Agent: agent2, + User: user1}, + TxnTrace: UserAgent{ + Agent: map[string]interface{}{}, + User: map[string]interface{}{}}, + }) +} + +var ( + allAgentAttributeNames = []string{ + AttributeResponseCode, + AttributeRequestMethod, + AttributeRequestAccept, + AttributeRequestContentType, + AttributeRequestContentLength, + AttributeRequestHost, + AttributeResponseContentType, + AttributeResponseContentLength, + AttributeHostDisplayName, + AttributeRequestUserAgent, + AttributeRequestReferer, + } +) + +func TestAgentAttributesExcluded(t *testing.T) { + agentAttributeTestcase(t, func(cfg *Config) { + cfg.Attributes.Exclude = allAgentAttributeNames + }, AttributeExpect{ + TxnEvent: UserAgent{ + Agent: map[string]interface{}{}, + User: user1}, + Error: UserAgent{ + Agent: map[string]interface{}{}, + User: user1}, + TxnTrace: UserAgent{ + Agent: map[string]interface{}{}, + User: user1}, + }) +} + +func TestAgentAttributesExcludedFromErrors(t *testing.T) { + agentAttributeTestcase(t, func(cfg *Config) { + cfg.ErrorCollector.Attributes.Exclude = allAgentAttributeNames + }, AttributeExpect{ + TxnEvent: UserAgent{ + Agent: agent1, + User: user1}, + Error: UserAgent{ + Agent: map[string]interface{}{}, + User: user1}, + TxnTrace: UserAgent{ + Agent: agent2, + User: user1}, + }) +} + +func TestAgentAttributesExcludedFromTxnEvents(t *testing.T) { + agentAttributeTestcase(t, func(cfg *Config) { + cfg.TransactionEvents.Attributes.Exclude = allAgentAttributeNames + }, AttributeExpect{ + TxnEvent: UserAgent{ + Agent: map[string]interface{}{}, + User: user1}, + Error: UserAgent{ + Agent: agent2, + User: user1}, + TxnTrace: UserAgent{ + Agent: agent2, + User: user1}, + }) +} + +func TestAgentAttributesExcludedFromTxnTraces(t *testing.T) { + agentAttributeTestcase(t, func(cfg *Config) { + cfg.TransactionTracer.Attributes.Exclude = allAgentAttributeNames + }, AttributeExpect{ + TxnEvent: UserAgent{ + Agent: agent1, + User: user1}, + Error: UserAgent{ + Agent: agent2, + User: user1}, + TxnTrace: UserAgent{ + Agent: map[string]interface{}{}, + User: user1}, + }) +} diff --git a/vendor/github.com/newrelic/go-agent/internal_benchmark_test.go b/vendor/github.com/newrelic/go-agent/internal_benchmark_test.go new file mode 100644 index 00000000..1f666eec --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal_benchmark_test.go @@ -0,0 +1,214 @@ +package newrelic + +import ( + "net/http" + "testing" +) + +// BenchmarkMuxWithoutNewRelic acts as a control against the other mux +// benchmarks. +func BenchmarkMuxWithoutNewRelic(b *testing.B) { + mux := http.NewServeMux() + mux.HandleFunc(helloPath, handler) + + w := newCompatibleResponseRecorder() + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + mux.ServeHTTP(w, helloRequest) + } +} + +// BenchmarkMuxWithNewRelic shows the approximate overhead of instrumenting a +// request. The numbers here are approximate since this is a test app: rather +// than putting the transaction into a channel to be processed by another +// goroutine, the transaction is merged directly into a harvest. +func BenchmarkMuxWithNewRelic(b *testing.B) { + app := testApp(nil, nil, b) + mux := http.NewServeMux() + mux.HandleFunc(WrapHandleFunc(app, helloPath, handler)) + + w := newCompatibleResponseRecorder() + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + mux.ServeHTTP(w, helloRequest) + } +} + +// BenchmarkMuxWithNewRelic shows the overhead of instrumenting a request when +// the agent is disabled. +func BenchmarkMuxDisabledMode(b *testing.B) { + cfg := NewConfig("my app", sampleLicense) + cfg.Enabled = false + app, err := newApp(cfg) + if nil != err { + b.Fatal(err) + } + mux := http.NewServeMux() + mux.HandleFunc(WrapHandleFunc(app, helloPath, handler)) + + w := newCompatibleResponseRecorder() + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + mux.ServeHTTP(w, helloRequest) + } +} + +// BenchmarkTraceSegmentWithDefer shows the overhead of instrumenting a segment +// using defer. This and BenchmarkTraceSegmentNoDefer are extremely important: +// Timing functions and blocks of code should have minimal cost. +func BenchmarkTraceSegmentWithDefer(b *testing.B) { + cfg := NewConfig("my app", sampleLicense) + cfg.Enabled = false + app, err := newApp(cfg) + if nil != err { + b.Fatal(err) + } + txn := app.StartTransaction("my txn", nil, nil) + fn := func() { + defer StartSegment(txn, "alpha").End() + } + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + fn() + } +} + +func BenchmarkTraceSegmentNoDefer(b *testing.B) { + cfg := NewConfig("my app", sampleLicense) + cfg.Enabled = false + app, err := newApp(cfg) + if nil != err { + b.Fatal(err) + } + txn := app.StartTransaction("my txn", nil, nil) + fn := func() { + s := StartSegment(txn, "alpha") + s.End() + } + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + fn() + } +} + +func BenchmarkTraceSegmentZeroSegmentThreshold(b *testing.B) { + cfg := NewConfig("my app", sampleLicense) + cfg.Enabled = false + cfg.TransactionTracer.SegmentThreshold = 0 + app, err := newApp(cfg) + if nil != err { + b.Fatal(err) + } + txn := app.StartTransaction("my txn", nil, nil) + fn := func() { + s := StartSegment(txn, "alpha") + s.End() + } + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + fn() + } +} + +func BenchmarkDatastoreSegment(b *testing.B) { + cfg := NewConfig("my app", sampleLicense) + cfg.Enabled = false + app, err := newApp(cfg) + if nil != err { + b.Fatal(err) + } + txn := app.StartTransaction("my txn", nil, nil) + fn := func(txn Transaction) { + defer DatastoreSegment{ + StartTime: txn.StartSegmentNow(), + Product: DatastoreMySQL, + Collection: "my_table", + Operation: "Select", + }.End() + } + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + fn(txn) + } +} + +func BenchmarkExternalSegment(b *testing.B) { + cfg := NewConfig("my app", sampleLicense) + cfg.Enabled = false + app, err := newApp(cfg) + if nil != err { + b.Fatal(err) + } + txn := app.StartTransaction("my txn", nil, nil) + fn := func(txn Transaction) { + defer ExternalSegment{ + StartTime: txn.StartSegmentNow(), + URL: "http://example.com/", + }.End() + } + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + fn(txn) + } +} + +func BenchmarkTxnWithSegment(b *testing.B) { + app := testApp(nil, nil, b) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + txn := app.StartTransaction("my txn", nil, nil) + StartSegment(txn, "myFunction").End() + txn.End() + } +} + +func BenchmarkTxnWithDatastore(b *testing.B) { + app := testApp(nil, nil, b) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + txn := app.StartTransaction("my txn", nil, nil) + DatastoreSegment{ + StartTime: txn.StartSegmentNow(), + Product: DatastoreMySQL, + Collection: "my_table", + Operation: "Select", + }.End() + txn.End() + } +} + +func BenchmarkTxnWithExternal(b *testing.B) { + app := testApp(nil, nil, b) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + txn := app.StartTransaction("my txn", nil, nil) + ExternalSegment{ + StartTime: txn.StartSegmentNow(), + URL: "http://example.com", + }.End() + txn.End() + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal_config.go b/vendor/github.com/newrelic/go-agent/internal_config.go new file mode 100644 index 00000000..f013781f --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal_config.go @@ -0,0 +1,153 @@ +package newrelic + +import ( + "encoding/json" + "fmt" + "net/http" + "os" + "strings" + + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/logger" + "github.com/newrelic/go-agent/internal/utilization" +) + +func copyDestConfig(c AttributeDestinationConfig) AttributeDestinationConfig { + cp := c + if nil != c.Include { + cp.Include = make([]string, len(c.Include)) + copy(cp.Include, c.Include) + } + if nil != c.Exclude { + cp.Exclude = make([]string, len(c.Exclude)) + copy(cp.Exclude, c.Exclude) + } + return cp +} + +func copyConfigReferenceFields(cfg Config) Config { + cp := cfg + if nil != cfg.Labels { + cp.Labels = make(map[string]string, len(cfg.Labels)) + for key, val := range cfg.Labels { + cp.Labels[key] = val + } + } + if nil != cfg.ErrorCollector.IgnoreStatusCodes { + ignored := make([]int, len(cfg.ErrorCollector.IgnoreStatusCodes)) + copy(ignored, cfg.ErrorCollector.IgnoreStatusCodes) + cp.ErrorCollector.IgnoreStatusCodes = ignored + } + + cp.Attributes = copyDestConfig(cfg.Attributes) + cp.ErrorCollector.Attributes = copyDestConfig(cfg.ErrorCollector.Attributes) + cp.TransactionEvents.Attributes = copyDestConfig(cfg.TransactionEvents.Attributes) + cp.TransactionTracer.Attributes = copyDestConfig(cfg.TransactionTracer.Attributes) + + return cp +} + +const ( + agentLanguage = "go" +) + +func transportSetting(t http.RoundTripper) interface{} { + if nil == t { + return nil + } + return fmt.Sprintf("%T", t) +} + +func loggerSetting(lg Logger) interface{} { + if nil == lg { + return nil + } + if _, ok := lg.(logger.ShimLogger); ok { + return nil + } + return fmt.Sprintf("%T", lg) +} + +const ( + // https://source.datanerd.us/agents/agent-specs/blob/master/Custom-Host-Names.md + hostByteLimit = 255 +) + +type settings Config + +func (s settings) MarshalJSON() ([]byte, error) { + c := Config(s) + transport := c.Transport + c.Transport = nil + logger := c.Logger + c.Logger = nil + + js, err := json.Marshal(c) + if nil != err { + return nil, err + } + fields := make(map[string]interface{}) + err = json.Unmarshal(js, &fields) + if nil != err { + return nil, err + } + // The License field is not simply ignored by adding the `json:"-"` tag + // to it since we want to allow consumers to populate Config from JSON. + delete(fields, `License`) + fields[`Transport`] = transportSetting(transport) + fields[`Logger`] = loggerSetting(logger) + return json.Marshal(fields) +} + +func configConnectJSONInternal(c Config, pid int, util *utilization.Data, e internal.Environment, version string) ([]byte, error) { + return json.Marshal([]interface{}{struct { + Pid int `json:"pid"` + Language string `json:"language"` + Version string `json:"agent_version"` + Host string `json:"host"` + HostDisplayName string `json:"display_host,omitempty"` + Settings interface{} `json:"settings"` + AppName []string `json:"app_name"` + HighSecurity bool `json:"high_security"` + Labels internal.Labels `json:"labels,omitempty"` + Environment internal.Environment `json:"environment"` + Identifier string `json:"identifier"` + Util *utilization.Data `json:"utilization"` + }{ + Pid: pid, + Language: agentLanguage, + Version: version, + Host: internal.StringLengthByteLimit(util.Hostname, hostByteLimit), + HostDisplayName: internal.StringLengthByteLimit(c.HostDisplayName, hostByteLimit), + Settings: (settings)(c), + AppName: strings.Split(c.AppName, ";"), + HighSecurity: c.HighSecurity, + Labels: internal.Labels(c.Labels), + Environment: e, + // This identifier field is provided to avoid: + // https://newrelic.atlassian.net/browse/DSCORE-778 + // + // This identifier is used by the collector to look up the real + // agent. If an identifier isn't provided, the collector will + // create its own based on the first appname, which prevents a + // single daemon from connecting "a;b" and "a;c" at the same + // time. + // + // Providing the identifier below works around this issue and + // allows users more flexibility in using application rollups. + Identifier: c.AppName, + Util: util, + }}) +} + +func configConnectJSON(c Config) ([]byte, error) { + env := internal.NewEnvironment() + util := utilization.Gather(utilization.Config{ + DetectAWS: c.Utilization.DetectAWS, + DetectDocker: c.Utilization.DetectDocker, + LogicalProcessors: c.Utilization.LogicalProcessors, + TotalRAMMIB: c.Utilization.TotalRAMMIB, + BillingHostname: c.Utilization.BillingHostname, + }, c.Logger) + return configConnectJSONInternal(c, os.Getpid(), util, env, Version) +} diff --git a/vendor/github.com/newrelic/go-agent/internal_config_test.go b/vendor/github.com/newrelic/go-agent/internal_config_test.go new file mode 100644 index 00000000..e93b65fb --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal_config_test.go @@ -0,0 +1,302 @@ +package newrelic + +import ( + "net/http" + "os" + "regexp" + "strconv" + "strings" + "testing" + + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/utilization" +) + +var ( + fixRegex = regexp.MustCompile(`e\+\d+`) +) + +// In Go 1.8 Marshalling of numbers was changed: +// Before: "StackTraceThreshold":5e+08 +// After: "StackTraceThreshold":500000000 +func standardizeNumbers(input string) string { + return fixRegex.ReplaceAllStringFunc(input, func(s string) string { + n, err := strconv.Atoi(s[2:]) + if nil != err { + return s + } + return strings.Repeat("0", n) + }) +} + +func TestCopyConfigReferenceFieldsPresent(t *testing.T) { + cfg := NewConfig("my appname", "0123456789012345678901234567890123456789") + cfg.Labels["zip"] = "zap" + cfg.ErrorCollector.IgnoreStatusCodes = append(cfg.ErrorCollector.IgnoreStatusCodes, 405) + cfg.Attributes.Include = append(cfg.Attributes.Include, "1") + cfg.Attributes.Exclude = append(cfg.Attributes.Exclude, "2") + cfg.TransactionEvents.Attributes.Include = append(cfg.TransactionEvents.Attributes.Include, "3") + cfg.TransactionEvents.Attributes.Exclude = append(cfg.TransactionEvents.Attributes.Exclude, "4") + cfg.ErrorCollector.Attributes.Include = append(cfg.ErrorCollector.Attributes.Include, "5") + cfg.ErrorCollector.Attributes.Exclude = append(cfg.ErrorCollector.Attributes.Exclude, "6") + cfg.TransactionTracer.Attributes.Include = append(cfg.TransactionTracer.Attributes.Include, "7") + cfg.TransactionTracer.Attributes.Exclude = append(cfg.TransactionTracer.Attributes.Exclude, "8") + cfg.Transport = &http.Transport{} + cfg.Logger = NewLogger(os.Stdout) + + cp := copyConfigReferenceFields(cfg) + + cfg.Labels["zop"] = "zup" + cfg.ErrorCollector.IgnoreStatusCodes[0] = 201 + cfg.Attributes.Include[0] = "zap" + cfg.Attributes.Exclude[0] = "zap" + cfg.TransactionEvents.Attributes.Include[0] = "zap" + cfg.TransactionEvents.Attributes.Exclude[0] = "zap" + cfg.ErrorCollector.Attributes.Include[0] = "zap" + cfg.ErrorCollector.Attributes.Exclude[0] = "zap" + cfg.TransactionTracer.Attributes.Include[0] = "zap" + cfg.TransactionTracer.Attributes.Exclude[0] = "zap" + + expect := internal.CompactJSONString(`[ + { + "pid":123, + "language":"go", + "agent_version":"0.2.2", + "host":"my-hostname", + "settings":{ + "AppName":"my appname", + "Attributes":{"Enabled":true,"Exclude":["2"],"Include":["1"]}, + "CustomInsightsEvents":{"Enabled":true}, + "DatastoreTracer":{ + "DatabaseNameReporting":{"Enabled":true}, + "InstanceReporting":{"Enabled":true}, + "QueryParameters":{"Enabled":true}, + "SlowQuery":{ + "Enabled":true, + "Threshold":10000000 + } + }, + "Enabled":true, + "ErrorCollector":{ + "Attributes":{"Enabled":true,"Exclude":["6"],"Include":["5"]}, + "CaptureEvents":true, + "Enabled":true, + "IgnoreStatusCodes":[404,405] + }, + "HighSecurity":false, + "HostDisplayName":"", + "Labels":{"zip":"zap"}, + "Logger":"*logger.logFile", + "RuntimeSampler":{"Enabled":true}, + "TransactionEvents":{ + "Attributes":{"Enabled":true,"Exclude":["4"],"Include":["3"]}, + "Enabled":true + }, + "TransactionTracer":{ + "Attributes":{"Enabled":true,"Exclude":["8"],"Include":["7"]}, + "Enabled":true, + "SegmentThreshold":2000000, + "StackTraceThreshold":500000000, + "Threshold":{ + "Duration":500000000, + "IsApdexFailing":true + } + }, + "Transport":"*http.Transport", + "UseTLS":true, + "Utilization":{ + "BillingHostname":"", + "DetectAWS":true, + "DetectDocker":true, + "LogicalProcessors":0, + "TotalRAMMIB":0 + } + }, + "app_name":["my appname"], + "high_security":false, + "labels":[{"label_type":"zip","label_value":"zap"}], + "environment":[ + ["runtime.Compiler","comp"], + ["runtime.GOARCH","arch"], + ["runtime.GOOS","goos"], + ["runtime.Version","vers"], + ["runtime.NumCPU",8] + ], + "identifier":"my appname", + "utilization":{ + "metadata_version":2, + "logical_processors":16, + "total_ram_mib":1024, + "hostname":"my-hostname" + } + }]`) + + js, err := configConnectJSONInternal(cp, 123, &utilization.SampleData, internal.SampleEnvironment, "0.2.2") + if nil != err { + t.Fatal(err) + } + out := standardizeNumbers(string(js)) + if out != expect { + t.Error(out) + } +} + +func TestCopyConfigReferenceFieldsAbsent(t *testing.T) { + cfg := NewConfig("my appname", "0123456789012345678901234567890123456789") + cfg.Labels = nil + cfg.ErrorCollector.IgnoreStatusCodes = nil + + cp := copyConfigReferenceFields(cfg) + + expect := internal.CompactJSONString(`[ + { + "pid":123, + "language":"go", + "agent_version":"0.2.2", + "host":"my-hostname", + "settings":{ + "AppName":"my appname", + "Attributes":{"Enabled":true,"Exclude":null,"Include":null}, + "CustomInsightsEvents":{"Enabled":true}, + "DatastoreTracer":{ + "DatabaseNameReporting":{"Enabled":true}, + "InstanceReporting":{"Enabled":true}, + "QueryParameters":{"Enabled":true}, + "SlowQuery":{ + "Enabled":true, + "Threshold":10000000 + } + }, + "Enabled":true, + "ErrorCollector":{ + "Attributes":{"Enabled":true,"Exclude":null,"Include":null}, + "CaptureEvents":true, + "Enabled":true, + "IgnoreStatusCodes":null + }, + "HighSecurity":false, + "HostDisplayName":"", + "Labels":null, + "Logger":null, + "RuntimeSampler":{"Enabled":true}, + "TransactionEvents":{ + "Attributes":{"Enabled":true,"Exclude":null,"Include":null}, + "Enabled":true + }, + "TransactionTracer":{ + "Attributes":{"Enabled":true,"Exclude":null,"Include":null}, + "Enabled":true, + "SegmentThreshold":2000000, + "StackTraceThreshold":500000000, + "Threshold":{ + "Duration":500000000, + "IsApdexFailing":true + } + }, + "Transport":null, + "UseTLS":true, + "Utilization":{ + "BillingHostname":"", + "DetectAWS":true, + "DetectDocker":true, + "LogicalProcessors":0, + "TotalRAMMIB":0 + } + }, + "app_name":["my appname"], + "high_security":false, + "environment":[ + ["runtime.Compiler","comp"], + ["runtime.GOARCH","arch"], + ["runtime.GOOS","goos"], + ["runtime.Version","vers"], + ["runtime.NumCPU",8] + ], + "identifier":"my appname", + "utilization":{ + "metadata_version":2, + "logical_processors":16, + "total_ram_mib":1024, + "hostname":"my-hostname" + } + }]`) + + js, err := configConnectJSONInternal(cp, 123, &utilization.SampleData, internal.SampleEnvironment, "0.2.2") + if nil != err { + t.Fatal(err) + } + out := standardizeNumbers(string(js)) + if out != expect { + t.Error(string(js)) + } +} + +func TestValidate(t *testing.T) { + c := Config{ + License: "0123456789012345678901234567890123456789", + AppName: "my app", + Enabled: true, + } + if err := c.Validate(); nil != err { + t.Error(err) + } + c = Config{ + License: "", + AppName: "my app", + Enabled: true, + } + if err := c.Validate(); err != errLicenseLen { + t.Error(err) + } + c = Config{ + License: "", + AppName: "my app", + Enabled: false, + } + if err := c.Validate(); nil != err { + t.Error(err) + } + c = Config{ + License: "wronglength", + AppName: "my app", + Enabled: true, + } + if err := c.Validate(); err != errLicenseLen { + t.Error(err) + } + c = Config{ + License: "0123456789012345678901234567890123456789", + AppName: "too;many;app;names", + Enabled: true, + } + if err := c.Validate(); err != errAppNameLimit { + t.Error(err) + } + c = Config{ + License: "0123456789012345678901234567890123456789", + AppName: "", + Enabled: true, + } + if err := c.Validate(); err != errAppNameMissing { + t.Error(err) + } + c = Config{ + License: "0123456789012345678901234567890123456789", + AppName: "my app", + Enabled: true, + HighSecurity: true, + } + if err := c.Validate(); err != errHighSecurityTLS { + t.Error(err) + } + c = Config{ + License: "0123456789012345678901234567890123456789", + AppName: "my app", + Enabled: true, + UseTLS: true, + HighSecurity: true, + } + if err := c.Validate(); err != nil { + t.Error(err) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal_errors_test.go b/vendor/github.com/newrelic/go-agent/internal_errors_test.go new file mode 100644 index 00000000..6c1e70ba --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal_errors_test.go @@ -0,0 +1,300 @@ +package newrelic + +import ( + "runtime" + "testing" + + "github.com/newrelic/go-agent/internal" +) + +type myError struct{} + +func (e myError) Error() string { return "my msg" } + +func TestNoticeErrorBackground(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, nil) + err := txn.NoticeError(myError{}) + if nil != err { + t.Error(err) + } + txn.End() + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "OtherTransaction/Go/hello", + Msg: "my msg", + Klass: "newrelic.myError", + Caller: "go-agent.TestNoticeErrorBackground", + URL: "", + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.myError", + "error.message": "my msg", + "transactionName": "OtherTransaction/Go/hello", + }, + }}) + app.ExpectMetrics(t, backgroundErrorMetrics) +} + +func TestNoticeErrorWeb(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, helloRequest) + err := txn.NoticeError(myError{}) + if nil != err { + t.Error(err) + } + txn.End() + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "WebTransaction/Go/hello", + Msg: "my msg", + Klass: "newrelic.myError", + Caller: "go-agent.TestNoticeErrorWeb", + URL: "/hello", + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.myError", + "error.message": "my msg", + "transactionName": "WebTransaction/Go/hello", + }, + }}) + app.ExpectMetrics(t, webErrorMetrics) +} + +func TestNoticeErrorTxnEnded(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, nil) + txn.End() + err := txn.NoticeError(myError{}) + if err != errAlreadyEnded { + t.Error(err) + } + txn.End() + app.ExpectErrors(t, []internal.WantError{}) + app.ExpectErrorEvents(t, []internal.WantEvent{}) + app.ExpectMetrics(t, backgroundMetrics) +} + +func TestNoticeErrorHighSecurity(t *testing.T) { + cfgFn := func(cfg *Config) { cfg.HighSecurity = true } + app := testApp(nil, cfgFn, t) + txn := app.StartTransaction("hello", nil, nil) + err := txn.NoticeError(myError{}) + if nil != err { + t.Error(err) + } + txn.End() + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "OtherTransaction/Go/hello", + Msg: highSecurityErrorMsg, + Klass: "newrelic.myError", + Caller: "go-agent.TestNoticeErrorHighSecurity", + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.myError", + "error.message": highSecurityErrorMsg, + "transactionName": "OtherTransaction/Go/hello", + }, + }}) + app.ExpectMetrics(t, backgroundErrorMetrics) +} + +func TestNoticeErrorLocallyDisabled(t *testing.T) { + cfgFn := func(cfg *Config) { cfg.ErrorCollector.Enabled = false } + app := testApp(nil, cfgFn, t) + txn := app.StartTransaction("hello", nil, nil) + err := txn.NoticeError(myError{}) + if errorsLocallyDisabled != err { + t.Error(err) + } + txn.End() + app.ExpectErrors(t, []internal.WantError{}) + app.ExpectErrorEvents(t, []internal.WantEvent{}) + app.ExpectMetrics(t, backgroundMetrics) +} + +func TestNoticeErrorRemotelyDisabled(t *testing.T) { + replyfn := func(reply *internal.ConnectReply) { reply.CollectErrors = false } + app := testApp(replyfn, nil, t) + txn := app.StartTransaction("hello", nil, nil) + err := txn.NoticeError(myError{}) + if errorsRemotelyDisabled != err { + t.Error(err) + } + txn.End() + app.ExpectErrors(t, []internal.WantError{}) + app.ExpectErrorEvents(t, []internal.WantEvent{}) + app.ExpectMetrics(t, backgroundMetrics) +} + +func TestNoticeErrorNil(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, nil) + err := txn.NoticeError(nil) + if errNilError != err { + t.Error(err) + } + txn.End() + app.ExpectErrors(t, []internal.WantError{}) + app.ExpectErrorEvents(t, []internal.WantEvent{}) + app.ExpectMetrics(t, backgroundMetrics) +} + +func TestNoticeErrorEventsLocallyDisabled(t *testing.T) { + cfgFn := func(cfg *Config) { cfg.ErrorCollector.CaptureEvents = false } + app := testApp(nil, cfgFn, t) + txn := app.StartTransaction("hello", nil, nil) + err := txn.NoticeError(myError{}) + if nil != err { + t.Error(err) + } + txn.End() + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "OtherTransaction/Go/hello", + Msg: "my msg", + Klass: "newrelic.myError", + Caller: "go-agent.TestNoticeErrorEventsLocallyDisabled", + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{}) + app.ExpectMetrics(t, backgroundErrorMetrics) +} + +func TestNoticeErrorEventsRemotelyDisabled(t *testing.T) { + replyfn := func(reply *internal.ConnectReply) { reply.CollectErrorEvents = false } + app := testApp(replyfn, nil, t) + txn := app.StartTransaction("hello", nil, nil) + err := txn.NoticeError(myError{}) + if nil != err { + t.Error(err) + } + txn.End() + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "OtherTransaction/Go/hello", + Msg: "my msg", + Klass: "newrelic.myError", + Caller: "go-agent.TestNoticeErrorEventsRemotelyDisabled", + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{}) + app.ExpectMetrics(t, backgroundErrorMetrics) +} + +type errorWithClass struct{ class string } + +func (e errorWithClass) Error() string { return "my msg" } +func (e errorWithClass) ErrorClass() string { return e.class } + +func TestErrorWithClasser(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, nil) + err := txn.NoticeError(errorWithClass{class: "zap"}) + if nil != err { + t.Error(err) + } + txn.End() + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "OtherTransaction/Go/hello", + Msg: "my msg", + Klass: "zap", + Caller: "go-agent.TestErrorWithClasser", + URL: "", + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "zap", + "error.message": "my msg", + "transactionName": "OtherTransaction/Go/hello", + }, + }}) + app.ExpectMetrics(t, backgroundErrorMetrics) +} + +func TestErrorWithClasserReturnsEmpty(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, nil) + err := txn.NoticeError(errorWithClass{class: ""}) + if nil != err { + t.Error(err) + } + txn.End() + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "OtherTransaction/Go/hello", + Msg: "my msg", + Klass: "newrelic.errorWithClass", + Caller: "go-agent.TestErrorWithClasserReturnsEmpty", + URL: "", + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.errorWithClass", + "error.message": "my msg", + "transactionName": "OtherTransaction/Go/hello", + }, + }}) + app.ExpectMetrics(t, backgroundErrorMetrics) +} + +type withStackTrace struct{ trace []uintptr } + +func makeErrorWithStackTrace() error { + callers := make([]uintptr, 20) + written := runtime.Callers(1, callers) + return withStackTrace{ + trace: callers[0:written], + } +} + +func (e withStackTrace) Error() string { return "my msg" } +func (e withStackTrace) StackTrace() []uintptr { return e.trace } + +func TestErrorWithStackTrace(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, nil) + e := makeErrorWithStackTrace() + err := txn.NoticeError(e) + if nil != err { + t.Error(err) + } + txn.End() + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "OtherTransaction/Go/hello", + Msg: "my msg", + Klass: "newrelic.withStackTrace", + Caller: "go-agent.makeErrorWithStackTrace", + URL: "", + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.withStackTrace", + "error.message": "my msg", + "transactionName": "OtherTransaction/Go/hello", + }, + }}) + app.ExpectMetrics(t, backgroundErrorMetrics) +} + +func TestErrorWithStackTraceReturnsNil(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, nil) + e := withStackTrace{trace: nil} + err := txn.NoticeError(e) + if nil != err { + t.Error(err) + } + txn.End() + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "OtherTransaction/Go/hello", + Msg: "my msg", + Klass: "newrelic.withStackTrace", + Caller: "go-agent.TestErrorWithStackTraceReturnsNil", + URL: "", + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.withStackTrace", + "error.message": "my msg", + "transactionName": "OtherTransaction/Go/hello", + }, + }}) + app.ExpectMetrics(t, backgroundErrorMetrics) +} diff --git a/vendor/github.com/newrelic/go-agent/internal_response_writer.go b/vendor/github.com/newrelic/go-agent/internal_response_writer.go new file mode 100644 index 00000000..fd202af2 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal_response_writer.go @@ -0,0 +1,121 @@ +package newrelic + +import ( + "bufio" + "io" + "net" + "net/http" +) + +const ( + hasC = 1 << iota // CloseNotifier + hasF // Flusher + hasH // Hijacker + hasR // ReaderFrom +) + +type wrap struct{ *txn } +type wrapR struct{ *txn } +type wrapH struct{ *txn } +type wrapHR struct{ *txn } +type wrapF struct{ *txn } +type wrapFR struct{ *txn } +type wrapFH struct{ *txn } +type wrapFHR struct{ *txn } +type wrapC struct{ *txn } +type wrapCR struct{ *txn } +type wrapCH struct{ *txn } +type wrapCHR struct{ *txn } +type wrapCF struct{ *txn } +type wrapCFR struct{ *txn } +type wrapCFH struct{ *txn } +type wrapCFHR struct{ *txn } + +func (x wrapC) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() } +func (x wrapCR) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() } +func (x wrapCH) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() } +func (x wrapCHR) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() } +func (x wrapCF) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() } +func (x wrapCFR) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() } +func (x wrapCFH) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() } +func (x wrapCFHR) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() } + +func (x wrapF) Flush() { x.W.(http.Flusher).Flush() } +func (x wrapFR) Flush() { x.W.(http.Flusher).Flush() } +func (x wrapFH) Flush() { x.W.(http.Flusher).Flush() } +func (x wrapFHR) Flush() { x.W.(http.Flusher).Flush() } +func (x wrapCF) Flush() { x.W.(http.Flusher).Flush() } +func (x wrapCFR) Flush() { x.W.(http.Flusher).Flush() } +func (x wrapCFH) Flush() { x.W.(http.Flusher).Flush() } +func (x wrapCFHR) Flush() { x.W.(http.Flusher).Flush() } + +func (x wrapH) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() } +func (x wrapHR) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() } +func (x wrapFH) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() } +func (x wrapFHR) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() } +func (x wrapCH) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() } +func (x wrapCHR) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() } +func (x wrapCFH) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() } +func (x wrapCFHR) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() } + +func (x wrapR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) } +func (x wrapHR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) } +func (x wrapFR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) } +func (x wrapFHR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) } +func (x wrapCR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) } +func (x wrapCHR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) } +func (x wrapCFR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) } +func (x wrapCFHR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) } + +func upgradeTxn(txn *txn) Transaction { + x := 0 + if _, ok := txn.W.(http.CloseNotifier); ok { + x |= hasC + } + if _, ok := txn.W.(http.Flusher); ok { + x |= hasF + } + if _, ok := txn.W.(http.Hijacker); ok { + x |= hasH + } + if _, ok := txn.W.(io.ReaderFrom); ok { + x |= hasR + } + + switch x { + default: + // Wrap the transaction even when there are no methods needed to + // ensure consistent error stack trace depth. + return wrap{txn} + case hasR: + return wrapR{txn} + case hasH: + return wrapH{txn} + case hasH | hasR: + return wrapHR{txn} + case hasF: + return wrapF{txn} + case hasF | hasR: + return wrapFR{txn} + case hasF | hasH: + return wrapFH{txn} + case hasF | hasH | hasR: + return wrapFHR{txn} + case hasC: + return wrapC{txn} + case hasC | hasR: + return wrapCR{txn} + case hasC | hasH: + return wrapCH{txn} + case hasC | hasH | hasR: + return wrapCHR{txn} + case hasC | hasF: + return wrapCF{txn} + case hasC | hasF | hasR: + return wrapCFR{txn} + case hasC | hasF | hasH: + return wrapCFH{txn} + case hasC | hasF | hasH | hasR: + return wrapCFHR{txn} + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal_slow_queries_test.go b/vendor/github.com/newrelic/go-agent/internal_slow_queries_test.go new file mode 100644 index 00000000..c2636741 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal_slow_queries_test.go @@ -0,0 +1,730 @@ +package newrelic + +import ( + "strings" + "testing" + "time" + + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/crossagent" +) + +func TestSlowQueryBasic(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.DatastoreTracer.SlowQuery.Threshold = 0 + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s1 := DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastoreMySQL, + Collection: "users", + Operation: "INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + } + s1.End() + txn.End() + + app.ExpectSlowQueries(t, []internal.WantSlowQuery{{ + Count: 1, + MetricName: "Datastore/statement/MySQL/users/INSERT", + Query: "INSERT INTO users (name, age) VALUES ($1, $2)", + TxnName: "WebTransaction/Go/hello", + TxnURL: "/hello", + DatabaseName: "", + Host: "", + PortPathOrID: "", + }}) +} + +func TestSlowQueryLocallyDisabled(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.DatastoreTracer.SlowQuery.Threshold = 0 + cfg.DatastoreTracer.SlowQuery.Enabled = false + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s1 := DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastoreMySQL, + Collection: "users", + Operation: "INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + } + s1.End() + txn.End() + + app.ExpectSlowQueries(t, []internal.WantSlowQuery{}) +} + +func TestSlowQueryRemotelyDisabled(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.DatastoreTracer.SlowQuery.Threshold = 0 + } + replyfn := func(reply *internal.ConnectReply) { + reply.CollectTraces = false + } + app := testApp(replyfn, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s1 := DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastoreMySQL, + Collection: "users", + Operation: "INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + } + s1.End() + txn.End() + + app.ExpectSlowQueries(t, []internal.WantSlowQuery{}) +} + +func TestSlowQueryBelowThreshold(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.DatastoreTracer.SlowQuery.Threshold = 1 * time.Hour + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s1 := DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastoreMySQL, + Collection: "users", + Operation: "INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + } + s1.End() + txn.End() + + app.ExpectSlowQueries(t, []internal.WantSlowQuery{}) +} + +func TestSlowQueryDatabaseProvided(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.DatastoreTracer.SlowQuery.Threshold = 0 + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s1 := DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastoreMySQL, + Collection: "users", + Operation: "INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + DatabaseName: "my_database", + } + s1.End() + txn.End() + + app.ExpectSlowQueries(t, []internal.WantSlowQuery{{ + Count: 1, + MetricName: "Datastore/statement/MySQL/users/INSERT", + Query: "INSERT INTO users (name, age) VALUES ($1, $2)", + TxnName: "WebTransaction/Go/hello", + TxnURL: "/hello", + DatabaseName: "my_database", + Host: "", + PortPathOrID: "", + }}) +} + +func TestSlowQueryHostProvided(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.DatastoreTracer.SlowQuery.Threshold = 0 + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s1 := DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastoreMySQL, + Collection: "users", + Operation: "INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + Host: "db-server-1", + } + s1.End() + txn.End() + + app.ExpectSlowQueries(t, []internal.WantSlowQuery{{ + Count: 1, + MetricName: "Datastore/statement/MySQL/users/INSERT", + Query: "INSERT INTO users (name, age) VALUES ($1, $2)", + TxnName: "WebTransaction/Go/hello", + TxnURL: "/hello", + DatabaseName: "", + Host: "db-server-1", + PortPathOrID: "unknown", + }}) + scope := "WebTransaction/Go/hello" + app.ExpectMetrics(t, append([]internal.WantMetric{ + {Name: "Datastore/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/MySQL/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/MySQL/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/operation/MySQL/INSERT", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/statement/MySQL/users/INSERT", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/statement/MySQL/users/INSERT", Scope: scope, Forced: false, Data: nil}, + {Name: "Datastore/instance/MySQL/db-server-1/unknown", Scope: "", Forced: false, Data: nil}, + }, webMetrics...)) +} + +func TestSlowQueryPortProvided(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.DatastoreTracer.SlowQuery.Threshold = 0 + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s1 := DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastoreMySQL, + Collection: "users", + Operation: "INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + PortPathOrID: "98021", + } + s1.End() + txn.End() + + app.ExpectSlowQueries(t, []internal.WantSlowQuery{{ + Count: 1, + MetricName: "Datastore/statement/MySQL/users/INSERT", + Query: "INSERT INTO users (name, age) VALUES ($1, $2)", + TxnName: "WebTransaction/Go/hello", + TxnURL: "/hello", + DatabaseName: "", + Host: "unknown", + PortPathOrID: "98021", + }}) + scope := "WebTransaction/Go/hello" + app.ExpectMetrics(t, append([]internal.WantMetric{ + {Name: "Datastore/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/MySQL/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/MySQL/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/operation/MySQL/INSERT", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/statement/MySQL/users/INSERT", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/statement/MySQL/users/INSERT", Scope: scope, Forced: false, Data: nil}, + {Name: "Datastore/instance/MySQL/unknown/98021", Scope: "", Forced: false, Data: nil}, + }, webMetrics...)) +} + +func TestSlowQueryHostPortProvided(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.DatastoreTracer.SlowQuery.Threshold = 0 + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s1 := DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastoreMySQL, + Collection: "users", + Operation: "INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + Host: "db-server-1", + PortPathOrID: "98021", + } + s1.End() + txn.End() + + app.ExpectSlowQueries(t, []internal.WantSlowQuery{{ + Count: 1, + MetricName: "Datastore/statement/MySQL/users/INSERT", + Query: "INSERT INTO users (name, age) VALUES ($1, $2)", + TxnName: "WebTransaction/Go/hello", + TxnURL: "/hello", + DatabaseName: "", + Host: "db-server-1", + PortPathOrID: "98021", + }}) + scope := "WebTransaction/Go/hello" + app.ExpectMetrics(t, append([]internal.WantMetric{ + {Name: "Datastore/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/MySQL/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/MySQL/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/operation/MySQL/INSERT", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/statement/MySQL/users/INSERT", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/statement/MySQL/users/INSERT", Scope: scope, Forced: false, Data: nil}, + {Name: "Datastore/instance/MySQL/db-server-1/98021", Scope: "", Forced: false, Data: nil}, + }, webMetrics...)) +} + +func TestSlowQueryAggregation(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.DatastoreTracer.SlowQuery.Threshold = 0 + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastoreMySQL, + Collection: "users", + Operation: "INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + }.End() + DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastoreMySQL, + Collection: "users", + Operation: "INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + }.End() + DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastorePostgres, + Collection: "products", + Operation: "INSERT", + ParameterizedQuery: "INSERT INTO products (name, price) VALUES ($1, $2)", + }.End() + txn.End() + + app.ExpectSlowQueries(t, []internal.WantSlowQuery{{ + Count: 2, + MetricName: "Datastore/statement/MySQL/users/INSERT", + Query: "INSERT INTO users (name, age) VALUES ($1, $2)", + TxnName: "WebTransaction/Go/hello", + TxnURL: "/hello", + DatabaseName: "", + Host: "", + PortPathOrID: "", + }, { + Count: 1, + MetricName: "Datastore/statement/Postgres/products/INSERT", + Query: "INSERT INTO products (name, price) VALUES ($1, $2)", + TxnName: "WebTransaction/Go/hello", + TxnURL: "/hello", + DatabaseName: "", + Host: "", + PortPathOrID: "", + }, + }) +} + +func TestSlowQueryMissingQuery(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.DatastoreTracer.SlowQuery.Threshold = 0 + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s1 := DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastoreMySQL, + Collection: "users", + Operation: "INSERT", + } + s1.End() + txn.End() + + app.ExpectSlowQueries(t, []internal.WantSlowQuery{{ + Count: 1, + MetricName: "Datastore/statement/MySQL/users/INSERT", + Query: "'INSERT' on 'users' using 'MySQL'", + TxnName: "WebTransaction/Go/hello", + TxnURL: "/hello", + DatabaseName: "", + Host: "", + PortPathOrID: "", + }}) +} + +func TestSlowQueryMissingEverything(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.DatastoreTracer.SlowQuery.Threshold = 0 + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s1 := DatastoreSegment{ + StartTime: StartSegmentNow(txn), + } + s1.End() + txn.End() + + app.ExpectSlowQueries(t, []internal.WantSlowQuery{{ + Count: 1, + MetricName: "Datastore/operation/Unknown/other", + Query: "'other' on 'unknown' using 'Unknown'", + TxnName: "WebTransaction/Go/hello", + TxnURL: "/hello", + DatabaseName: "", + Host: "", + PortPathOrID: "", + }}) + scope := "WebTransaction/Go/hello" + app.ExpectMetrics(t, append([]internal.WantMetric{ + {Name: "Datastore/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/Unknown/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/Unknown/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/operation/Unknown/other", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/operation/Unknown/other", Scope: scope, Forced: false, Data: nil}, + }, webMetrics...)) +} + +func TestSlowQueryWithQueryParameters(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.DatastoreTracer.SlowQuery.Threshold = 0 + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + params := map[string]interface{}{ + "str": "zap", + "int": 123, + } + s1 := DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastoreMySQL, + Collection: "users", + Operation: "INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + QueryParameters: params, + } + s1.End() + txn.End() + + app.ExpectSlowQueries(t, []internal.WantSlowQuery{{ + Count: 1, + MetricName: "Datastore/statement/MySQL/users/INSERT", + Query: "INSERT INTO users (name, age) VALUES ($1, $2)", + TxnName: "WebTransaction/Go/hello", + TxnURL: "/hello", + DatabaseName: "", + Host: "", + PortPathOrID: "", + Params: params, + }}) +} + +func TestSlowQueryHighSecurity(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.DatastoreTracer.SlowQuery.Threshold = 0 + cfg.HighSecurity = true + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + params := map[string]interface{}{ + "str": "zap", + "int": 123, + } + s1 := DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastoreMySQL, + Collection: "users", + Operation: "INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + QueryParameters: params, + } + s1.End() + txn.End() + + app.ExpectSlowQueries(t, []internal.WantSlowQuery{{ + Count: 1, + MetricName: "Datastore/statement/MySQL/users/INSERT", + Query: "INSERT INTO users (name, age) VALUES ($1, $2)", + TxnName: "WebTransaction/Go/hello", + TxnURL: "/hello", + DatabaseName: "", + Host: "", + PortPathOrID: "", + Params: nil, + }}) +} + +func TestSlowQueryInvalidParameters(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.DatastoreTracer.SlowQuery.Threshold = 0 + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + params := map[string]interface{}{ + "str": "zap", + "int": 123, + "invalid_value": struct{}{}, + strings.Repeat("key-too-long", 100): 1, + "long-key": strings.Repeat("A", 300), + } + s1 := DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastoreMySQL, + Collection: "users", + Operation: "INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + QueryParameters: params, + } + s1.End() + txn.End() + + app.ExpectSlowQueries(t, []internal.WantSlowQuery{{ + Count: 1, + MetricName: "Datastore/statement/MySQL/users/INSERT", + Query: "INSERT INTO users (name, age) VALUES ($1, $2)", + TxnName: "WebTransaction/Go/hello", + TxnURL: "/hello", + DatabaseName: "", + Host: "", + PortPathOrID: "", + Params: map[string]interface{}{ + "str": "zap", + "int": 123, + "long-key": strings.Repeat("A", 255), + }, + }}) +} + +func TestSlowQueryParametersDisabled(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.DatastoreTracer.SlowQuery.Threshold = 0 + cfg.DatastoreTracer.QueryParameters.Enabled = false + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + params := map[string]interface{}{ + "str": "zap", + "int": 123, + } + s1 := DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastoreMySQL, + Collection: "users", + Operation: "INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + QueryParameters: params, + } + s1.End() + txn.End() + + app.ExpectSlowQueries(t, []internal.WantSlowQuery{{ + Count: 1, + MetricName: "Datastore/statement/MySQL/users/INSERT", + Query: "INSERT INTO users (name, age) VALUES ($1, $2)", + TxnName: "WebTransaction/Go/hello", + TxnURL: "/hello", + DatabaseName: "", + Host: "", + PortPathOrID: "", + Params: nil, + }}) +} + +func TestSlowQueryInstanceDisabled(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.DatastoreTracer.SlowQuery.Threshold = 0 + cfg.DatastoreTracer.InstanceReporting.Enabled = false + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s1 := DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastoreMySQL, + Collection: "users", + Operation: "INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + Host: "db-server-1", + } + s1.End() + txn.End() + + app.ExpectSlowQueries(t, []internal.WantSlowQuery{{ + Count: 1, + MetricName: "Datastore/statement/MySQL/users/INSERT", + Query: "INSERT INTO users (name, age) VALUES ($1, $2)", + TxnName: "WebTransaction/Go/hello", + TxnURL: "/hello", + DatabaseName: "", + Host: "", + PortPathOrID: "", + }}) + scope := "WebTransaction/Go/hello" + app.ExpectMetrics(t, append([]internal.WantMetric{ + {Name: "Datastore/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/MySQL/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/MySQL/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/operation/MySQL/INSERT", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/statement/MySQL/users/INSERT", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/statement/MySQL/users/INSERT", Scope: scope, Forced: false, Data: nil}, + }, webMetrics...)) +} + +func TestSlowQueryInstanceDisabledLocalhost(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.DatastoreTracer.SlowQuery.Threshold = 0 + cfg.DatastoreTracer.InstanceReporting.Enabled = false + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s1 := DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastoreMySQL, + Collection: "users", + Operation: "INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + Host: "localhost", + PortPathOrID: "3306", + } + s1.End() + txn.End() + + app.ExpectSlowQueries(t, []internal.WantSlowQuery{{ + Count: 1, + MetricName: "Datastore/statement/MySQL/users/INSERT", + Query: "INSERT INTO users (name, age) VALUES ($1, $2)", + TxnName: "WebTransaction/Go/hello", + TxnURL: "/hello", + DatabaseName: "", + Host: "", + PortPathOrID: "", + }}) + scope := "WebTransaction/Go/hello" + app.ExpectMetrics(t, append([]internal.WantMetric{ + {Name: "Datastore/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/MySQL/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/MySQL/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/operation/MySQL/INSERT", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/statement/MySQL/users/INSERT", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/statement/MySQL/users/INSERT", Scope: scope, Forced: false, Data: nil}, + }, webMetrics...)) +} + +func TestSlowQueryDatabaseNameDisabled(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.DatastoreTracer.SlowQuery.Threshold = 0 + cfg.DatastoreTracer.DatabaseNameReporting.Enabled = false + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s1 := DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastoreMySQL, + Collection: "users", + Operation: "INSERT", + ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)", + DatabaseName: "db-server-1", + } + s1.End() + txn.End() + + app.ExpectSlowQueries(t, []internal.WantSlowQuery{{ + Count: 1, + MetricName: "Datastore/statement/MySQL/users/INSERT", + Query: "INSERT INTO users (name, age) VALUES ($1, $2)", + TxnName: "WebTransaction/Go/hello", + TxnURL: "/hello", + DatabaseName: "", + Host: "", + PortPathOrID: "", + }}) +} + +func TestDatastoreAPICrossAgent(t *testing.T) { + var testcases []struct { + TestName string `json:"test_name"` + Input struct { + Parameters struct { + Product string `json:"product"` + Collection string `json:"collection"` + Operation string `json:"operation"` + Host string `json:"host"` + PortPathOrID string `json:"port_path_or_id"` + DatabaseName string `json:"database_name"` + } `json:"parameters"` + IsWeb bool `json:"is_web"` + SystemHostname string `json:"system_hostname"` + Configuration struct { + InstanceEnabled bool `json:"datastore_tracer.instance_reporting.enabled"` + DatabaseEnabled bool `json:"datastore_tracer.database_name_reporting.enabled"` + } + } + Expectation struct { + MetricsScoped []string `json:"metrics_scoped"` + MetricsUnscoped []string `json:"metrics_unscoped"` + Trace struct { + MetricName string `json:"metric_name"` + Host string `json:"host"` + PortPathOrID string `json:"port_path_or_id"` + DatabaseName string `json:"database_name"` + } `json:"transaction_segment_and_slow_query_trace"` + } + } + + err := crossagent.ReadJSON("datastores/datastore_api.json", &testcases) + if err != nil { + t.Fatal(err) + } + + for _, tc := range testcases { + query := "my query" + cfgfn := func(cfg *Config) { + cfg.DatastoreTracer.SlowQuery.Threshold = 0 + cfg.DatastoreTracer.InstanceReporting.Enabled = + tc.Input.Configuration.InstanceEnabled + cfg.DatastoreTracer.DatabaseNameReporting.Enabled = + tc.Input.Configuration.DatabaseEnabled + } + app := testApp(nil, cfgfn, t) + var txn Transaction + var txnURL string + if tc.Input.IsWeb { + txnURL = helloPath + txn = app.StartTransaction("hello", nil, helloRequest) + } else { + txn = app.StartTransaction("hello", nil, nil) + } + DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastoreProduct(tc.Input.Parameters.Product), + Operation: tc.Input.Parameters.Operation, + Collection: tc.Input.Parameters.Collection, + PortPathOrID: tc.Input.Parameters.PortPathOrID, + Host: tc.Input.Parameters.Host, + DatabaseName: tc.Input.Parameters.DatabaseName, + ParameterizedQuery: query, + }.End() + txn.End() + + var metrics []internal.WantMetric + var scope string + if tc.Input.IsWeb { + scope = "WebTransaction/Go/hello" + metrics = append([]internal.WantMetric{}, webMetrics...) + } else { + scope = "OtherTransaction/Go/hello" + metrics = append([]internal.WantMetric{}, backgroundMetrics...) + } + + for _, m := range tc.Expectation.MetricsScoped { + metrics = append(metrics, internal.WantMetric{ + Name: m, Scope: scope, Forced: nil, Data: nil, + }) + } + for _, m := range tc.Expectation.MetricsUnscoped { + metrics = append(metrics, internal.WantMetric{ + Name: m, Scope: "", Forced: nil, Data: nil, + }) + } + + expectTraceHost := tc.Expectation.Trace.Host + if tc.Input.SystemHostname != "" { + for i := range metrics { + metrics[i].Name = strings.Replace(metrics[i].Name, + tc.Input.SystemHostname, + internal.ThisHost, -1) + } + expectTraceHost = strings.Replace(expectTraceHost, + tc.Input.SystemHostname, + internal.ThisHost, -1) + } + + tt := internal.ExtendValidator(t, tc.TestName) + app.ExpectMetrics(tt, metrics) + app.ExpectSlowQueries(tt, []internal.WantSlowQuery{{ + Count: 1, + MetricName: tc.Expectation.Trace.MetricName, + TxnName: scope, + DatabaseName: tc.Expectation.Trace.DatabaseName, + Host: expectTraceHost, + PortPathOrID: tc.Expectation.Trace.PortPathOrID, + TxnURL: txnURL, + Query: query, + }}) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal_test.go b/vendor/github.com/newrelic/go-agent/internal_test.go new file mode 100644 index 00000000..6cd353b5 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal_test.go @@ -0,0 +1,1406 @@ +package newrelic + +import ( + "errors" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/newrelic/go-agent/internal" +) + +var ( + singleCount = []float64{1, 0, 0, 0, 0, 0, 0} + webMetrics = []internal.WantMetric{ + {Name: "WebTransaction/Go/hello", Scope: "", Forced: true, Data: nil}, + {Name: "WebTransaction", Scope: "", Forced: true, Data: nil}, + {Name: "HttpDispatcher", Scope: "", Forced: true, Data: nil}, + {Name: "Apdex", Scope: "", Forced: true, Data: nil}, + {Name: "Apdex/Go/hello", Scope: "", Forced: false, Data: nil}, + } + webErrorMetrics = append([]internal.WantMetric{ + {Name: "Errors/all", Scope: "", Forced: true, Data: singleCount}, + {Name: "Errors/allWeb", Scope: "", Forced: true, Data: singleCount}, + {Name: "Errors/WebTransaction/Go/hello", Scope: "", Forced: true, Data: singleCount}, + }, webMetrics...) + backgroundMetrics = []internal.WantMetric{ + {Name: "OtherTransaction/Go/hello", Scope: "", Forced: true, Data: nil}, + {Name: "OtherTransaction/all", Scope: "", Forced: true, Data: nil}, + } + backgroundErrorMetrics = append([]internal.WantMetric{ + {Name: "Errors/all", Scope: "", Forced: true, Data: singleCount}, + {Name: "Errors/allOther", Scope: "", Forced: true, Data: singleCount}, + {Name: "Errors/OtherTransaction/Go/hello", Scope: "", Forced: true, Data: singleCount}, + }, backgroundMetrics...) +) + +// compatibleResponseRecorder wraps ResponseRecorder to ensure consistent behavior +// between different versions of Go. +// +// Unfortunately, there was a behavior change in go1.6: +// +// "The net/http/httptest package's ResponseRecorder now initializes a default +// Content-Type header using the same content-sniffing algorithm as in +// http.Server." +type compatibleResponseRecorder struct { + *httptest.ResponseRecorder + wroteHeader bool +} + +func newCompatibleResponseRecorder() *compatibleResponseRecorder { + return &compatibleResponseRecorder{ + ResponseRecorder: httptest.NewRecorder(), + } +} + +func (rw *compatibleResponseRecorder) Header() http.Header { + return rw.ResponseRecorder.Header() +} + +func (rw *compatibleResponseRecorder) Write(buf []byte) (int, error) { + if !rw.wroteHeader { + rw.WriteHeader(200) + rw.wroteHeader = true + } + return rw.ResponseRecorder.Write(buf) +} + +func (rw *compatibleResponseRecorder) WriteHeader(code int) { + rw.wroteHeader = true + rw.ResponseRecorder.WriteHeader(code) +} + +var ( + sampleLicense = "0123456789012345678901234567890123456789" + validParams = map[string]interface{}{"zip": 1, "zap": 2} +) + +var ( + helloResponse = []byte("hello") + helloPath = "/hello" + helloQueryParams = "?secret=hideme" + helloRequest = func() *http.Request { + r, err := http.NewRequest("GET", helloPath+helloQueryParams, nil) + if nil != err { + panic(err) + } + + r.Header.Add(`Accept`, `text/plain`) + r.Header.Add(`Content-Type`, `text/html; charset=utf-8`) + r.Header.Add(`Content-Length`, `753`) + r.Header.Add(`Host`, `my_domain.com`) + r.Header.Add(`User-Agent`, `Mozilla/5.0`) + r.Header.Add(`Referer`, `http://en.wikipedia.org/zip?secret=password`) + + return r + }() +) + +func TestNewApplicationNil(t *testing.T) { + cfg := NewConfig("appname", "wrong length") + cfg.Enabled = false + app, err := NewApplication(cfg) + if nil == err { + t.Error("error expected when license key is short") + } + if nil != app { + t.Error("app expected to be nil when error is returned") + } +} + +func handler(w http.ResponseWriter, req *http.Request) { + w.Write(helloResponse) +} + +func testApp(replyfn func(*internal.ConnectReply), cfgfn func(*Config), t testing.TB) expectApp { + cfg := NewConfig("my app", "0123456789012345678901234567890123456789") + + if nil != cfgfn { + cfgfn(&cfg) + } + + app, err := newTestApp(replyfn, cfg) + if nil != err { + t.Fatal(err) + } + return app +} + +func TestRecordCustomEventSuccess(t *testing.T) { + app := testApp(nil, nil, t) + err := app.RecordCustomEvent("myType", validParams) + if nil != err { + t.Error(err) + } + app.ExpectCustomEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "type": "myType", + "timestamp": internal.MatchAnything, + }, + UserAttributes: validParams, + }}) +} + +func TestRecordCustomEventHighSecurityEnabled(t *testing.T) { + cfgfn := func(cfg *Config) { cfg.HighSecurity = true } + app := testApp(nil, cfgfn, t) + err := app.RecordCustomEvent("myType", validParams) + if err != errHighSecurityEnabled { + t.Error(err) + } + app.ExpectCustomEvents(t, []internal.WantEvent{}) +} + +func TestRecordCustomEventEventsDisabled(t *testing.T) { + cfgfn := func(cfg *Config) { cfg.CustomInsightsEvents.Enabled = false } + app := testApp(nil, cfgfn, t) + err := app.RecordCustomEvent("myType", validParams) + if err != errCustomEventsDisabled { + t.Error(err) + } + app.ExpectCustomEvents(t, []internal.WantEvent{}) +} + +func TestRecordCustomEventBadInput(t *testing.T) { + app := testApp(nil, nil, t) + err := app.RecordCustomEvent("????", validParams) + if err != internal.ErrEventTypeRegex { + t.Error(err) + } + app.ExpectCustomEvents(t, []internal.WantEvent{}) +} + +func TestRecordCustomEventRemoteDisable(t *testing.T) { + replyfn := func(reply *internal.ConnectReply) { reply.CollectCustomEvents = false } + app := testApp(replyfn, nil, t) + err := app.RecordCustomEvent("myType", validParams) + if err != errCustomEventsRemoteDisabled { + t.Error(err) + } + app.ExpectCustomEvents(t, []internal.WantEvent{}) +} + +type sampleResponseWriter struct { + code int + written int + header http.Header +} + +func (w *sampleResponseWriter) Header() http.Header { return w.header } +func (w *sampleResponseWriter) Write([]byte) (int, error) { return w.written, nil } +func (w *sampleResponseWriter) WriteHeader(x int) { w.code = x } + +func TestTxnResponseWriter(t *testing.T) { + // NOTE: Eventually when the ResponseWriter is instrumented, this test + // should be expanded to make sure that calling ResponseWriter methods + // after the transaction has ended is not problematic. + w := &sampleResponseWriter{ + header: make(http.Header), + } + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", w, nil) + w.header.Add("zip", "zap") + if out := txn.Header(); out.Get("zip") != "zap" { + t.Error(out.Get("zip")) + } + w.written = 123 + if out, _ := txn.Write(nil); out != 123 { + t.Error(out) + } + if txn.WriteHeader(503); w.code != 503 { + t.Error(w.code) + } +} + +func TestTransactionEventWeb(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, helloRequest) + err := txn.End() + if nil != err { + t.Error(err) + } + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/hello", + "nr.apdexPerfZone": "S", + }, + }}) +} + +func TestTransactionEventBackground(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, nil) + err := txn.End() + if nil != err { + t.Error(err) + } + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "OtherTransaction/Go/hello", + }, + }}) +} + +func TestTransactionEventLocallyDisabled(t *testing.T) { + cfgFn := func(cfg *Config) { cfg.TransactionEvents.Enabled = false } + app := testApp(nil, cfgFn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + err := txn.End() + if nil != err { + t.Error(err) + } + app.ExpectTxnEvents(t, []internal.WantEvent{}) +} + +func TestTransactionEventRemotelyDisabled(t *testing.T) { + replyfn := func(reply *internal.ConnectReply) { reply.CollectAnalyticsEvents = false } + app := testApp(replyfn, nil, t) + txn := app.StartTransaction("hello", nil, helloRequest) + err := txn.End() + if nil != err { + t.Error(err) + } + app.ExpectTxnEvents(t, []internal.WantEvent{}) +} + +func myErrorHandler(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("my response")) + if txn, ok := w.(Transaction); ok { + txn.NoticeError(myError{}) + } +} + +func TestWrapHandleFunc(t *testing.T) { + app := testApp(nil, nil, t) + mux := http.NewServeMux() + mux.HandleFunc(WrapHandleFunc(app, helloPath, myErrorHandler)) + w := newCompatibleResponseRecorder() + mux.ServeHTTP(w, helloRequest) + + out := w.Body.String() + if "my response" != out { + t.Error(out) + } + + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "WebTransaction/Go/hello", + Msg: "my msg", + Klass: "newrelic.myError", + Caller: "go-agent.myErrorHandler", + URL: "/hello", + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.myError", + "error.message": "my msg", + "transactionName": "WebTransaction/Go/hello", + }, + }}) + app.ExpectMetrics(t, webErrorMetrics) +} + +func TestWrapHandle(t *testing.T) { + app := testApp(nil, nil, t) + mux := http.NewServeMux() + mux.Handle(WrapHandle(app, helloPath, http.HandlerFunc(myErrorHandler))) + w := newCompatibleResponseRecorder() + mux.ServeHTTP(w, helloRequest) + + out := w.Body.String() + if "my response" != out { + t.Error(out) + } + + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "WebTransaction/Go/hello", + Msg: "my msg", + Klass: "newrelic.myError", + Caller: "go-agent.myErrorHandler", + URL: "/hello", + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.myError", + "error.message": "my msg", + "transactionName": "WebTransaction/Go/hello", + }, + }}) + app.ExpectMetrics(t, webErrorMetrics) +} + +func TestSetName(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("one", nil, nil) + if err := txn.SetName("hello"); nil != err { + t.Error(err) + } + txn.End() + if err := txn.SetName("three"); err != errAlreadyEnded { + t.Error(err) + } + + app.ExpectMetrics(t, backgroundMetrics) +} + +func deferEndPanic(txn Transaction, panicMe interface{}) (r interface{}) { + defer func() { + r = recover() + }() + + defer txn.End() + + panic(panicMe) +} + +func TestPanicError(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, nil) + + e := myError{} + r := deferEndPanic(txn, e) + if r != e { + t.Error("panic not propagated", r) + } + + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "OtherTransaction/Go/hello", + Msg: "my msg", + Klass: internal.PanicErrorKlass, + Caller: "go-agent.(*txn).End", + URL: "", + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": internal.PanicErrorKlass, + "error.message": "my msg", + "transactionName": "OtherTransaction/Go/hello", + }, + }}) + app.ExpectMetrics(t, backgroundErrorMetrics) +} + +func TestPanicString(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, nil) + + e := "my string" + r := deferEndPanic(txn, e) + if r != e { + t.Error("panic not propagated", r) + } + + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "OtherTransaction/Go/hello", + Msg: "my string", + Klass: internal.PanicErrorKlass, + Caller: "go-agent.(*txn).End", + URL: "", + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": internal.PanicErrorKlass, + "error.message": "my string", + "transactionName": "OtherTransaction/Go/hello", + }, + }}) + app.ExpectMetrics(t, backgroundErrorMetrics) +} + +func TestPanicInt(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, nil) + + e := 22 + r := deferEndPanic(txn, e) + if r != e { + t.Error("panic not propagated", r) + } + + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "OtherTransaction/Go/hello", + Msg: "22", + Klass: internal.PanicErrorKlass, + Caller: "go-agent.(*txn).End", + URL: "", + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": internal.PanicErrorKlass, + "error.message": "22", + "transactionName": "OtherTransaction/Go/hello", + }, + }}) + app.ExpectMetrics(t, backgroundErrorMetrics) +} + +func TestPanicNil(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, nil) + + r := deferEndPanic(txn, nil) + if nil != r { + t.Error(r) + } + + app.ExpectErrors(t, []internal.WantError{}) + app.ExpectErrorEvents(t, []internal.WantEvent{}) + app.ExpectMetrics(t, backgroundMetrics) +} + +func TestResponseCodeError(t *testing.T) { + app := testApp(nil, nil, t) + w := newCompatibleResponseRecorder() + txn := app.StartTransaction("hello", w, helloRequest) + + txn.WriteHeader(http.StatusBadRequest) // 400 + txn.WriteHeader(http.StatusUnauthorized) // 401 + + txn.End() + + if http.StatusBadRequest != w.Code { + t.Error(w.Code) + } + + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "WebTransaction/Go/hello", + Msg: "Bad Request", + Klass: "400", + Caller: "go-agent.(*txn).WriteHeader", + URL: "/hello", + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "400", + "error.message": "Bad Request", + "transactionName": "WebTransaction/Go/hello", + }, + }}) + app.ExpectMetrics(t, webErrorMetrics) +} + +func TestResponseCode404Filtered(t *testing.T) { + app := testApp(nil, nil, t) + w := newCompatibleResponseRecorder() + txn := app.StartTransaction("hello", w, helloRequest) + + txn.WriteHeader(http.StatusNotFound) + + txn.End() + + if http.StatusNotFound != w.Code { + t.Error(w.Code) + } + + app.ExpectErrors(t, []internal.WantError{}) + app.ExpectErrorEvents(t, []internal.WantEvent{}) + app.ExpectMetrics(t, webMetrics) +} + +func TestResponseCodeCustomFilter(t *testing.T) { + cfgFn := func(cfg *Config) { + cfg.ErrorCollector.IgnoreStatusCodes = + append(cfg.ErrorCollector.IgnoreStatusCodes, + http.StatusNotFound) + } + app := testApp(nil, cfgFn, t) + w := newCompatibleResponseRecorder() + txn := app.StartTransaction("hello", w, helloRequest) + + txn.WriteHeader(http.StatusNotFound) + + txn.End() + + app.ExpectErrors(t, []internal.WantError{}) + app.ExpectErrorEvents(t, []internal.WantEvent{}) + app.ExpectMetrics(t, webMetrics) +} + +func TestResponseCodeAfterEnd(t *testing.T) { + app := testApp(nil, nil, t) + w := newCompatibleResponseRecorder() + txn := app.StartTransaction("hello", w, helloRequest) + + txn.End() + txn.WriteHeader(http.StatusBadRequest) + + if http.StatusBadRequest != w.Code { + t.Error(w.Code) + } + + app.ExpectErrors(t, []internal.WantError{}) + app.ExpectErrorEvents(t, []internal.WantEvent{}) + app.ExpectMetrics(t, webMetrics) +} + +func TestResponseCodeAfterWrite(t *testing.T) { + app := testApp(nil, nil, t) + w := newCompatibleResponseRecorder() + txn := app.StartTransaction("hello", w, helloRequest) + + txn.Write([]byte("zap")) + txn.WriteHeader(http.StatusBadRequest) + + txn.End() + + if out := w.Body.String(); "zap" != out { + t.Error(out) + } + + if http.StatusOK != w.Code { + t.Error(w.Code) + } + + app.ExpectErrors(t, []internal.WantError{}) + app.ExpectErrorEvents(t, []internal.WantEvent{}) + app.ExpectMetrics(t, webMetrics) +} + +func TestQueueTime(t *testing.T) { + app := testApp(nil, nil, t) + req, err := http.NewRequest("GET", helloPath+helloQueryParams, nil) + req.Header.Add("X-Queue-Start", "1465793282.12345") + if nil != err { + t.Fatal(err) + } + txn := app.StartTransaction("hello", nil, req) + txn.NoticeError(myError{}) + txn.End() + + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "WebTransaction/Go/hello", + Msg: "my msg", + Klass: "newrelic.myError", + Caller: "go-agent.TestQueueTime", + URL: "/hello", + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.myError", + "error.message": "my msg", + "transactionName": "WebTransaction/Go/hello", + "queueDuration": internal.MatchAnything, + }, + }}) + app.ExpectMetrics(t, append([]internal.WantMetric{ + {Name: "WebFrontend/QueueTime", Scope: "", Forced: true, Data: nil}, + }, webErrorMetrics...)) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/hello", + "nr.apdexPerfZone": "F", + "queueDuration": internal.MatchAnything, + }, + AgentAttributes: nil, + }}) +} + +func TestIgnore(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, nil) + txn.NoticeError(myError{}) + err := txn.Ignore() + if nil != err { + t.Error(err) + } + txn.End() + app.ExpectErrors(t, []internal.WantError{}) + app.ExpectErrorEvents(t, []internal.WantEvent{}) + app.ExpectMetrics(t, []internal.WantMetric{}) + app.ExpectTxnEvents(t, []internal.WantEvent{}) +} + +func TestIgnoreAlreadyEnded(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, nil) + txn.NoticeError(myError{}) + txn.End() + err := txn.Ignore() + if err != errAlreadyEnded { + t.Error(err) + } + app.ExpectErrors(t, []internal.WantError{{ + TxnName: "OtherTransaction/Go/hello", + Msg: "my msg", + Klass: "newrelic.myError", + Caller: "go-agent.TestIgnoreAlreadyEnded", + URL: "", + }}) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.myError", + "error.message": "my msg", + "transactionName": "OtherTransaction/Go/hello", + }, + }}) + app.ExpectMetrics(t, backgroundErrorMetrics) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "OtherTransaction/Go/hello", + }, + }}) +} + +func TestResponseCodeIsError(t *testing.T) { + cfg := NewConfig("my app", "0123456789012345678901234567890123456789") + + if is := responseCodeIsError(&cfg, 200); is { + t.Error(is) + } + if is := responseCodeIsError(&cfg, 400); !is { + t.Error(is) + } + if is := responseCodeIsError(&cfg, 404); is { + t.Error(is) + } + if is := responseCodeIsError(&cfg, 503); !is { + t.Error(is) + } +} + +func TestExternalSegmentURL(t *testing.T) { + rawURL := "http://url.com" + req, err := http.NewRequest("GET", "http://request.com/", nil) + if err != nil { + t.Fatal(err) + } + responsereq, err := http.NewRequest("GET", "http://response.com/", nil) + if err != nil { + t.Fatal(err) + } + response := &http.Response{Request: responsereq} + + // empty segment + u, err := externalSegmentURL(ExternalSegment{}) + host := internal.HostFromURL(u) + if nil != err || nil != u || "" != host { + t.Error(u, err, internal.HostFromURL(u)) + } + // segment only containing url + u, err = externalSegmentURL(ExternalSegment{URL: rawURL}) + host = internal.HostFromURL(u) + if nil != err || host != "url.com" { + t.Error(u, err, internal.HostFromURL(u)) + } + // segment only containing request + u, err = externalSegmentURL(ExternalSegment{Request: req}) + host = internal.HostFromURL(u) + if nil != err || "request.com" != host { + t.Error(host) + } + // segment only containing response + u, err = externalSegmentURL(ExternalSegment{Response: response}) + host = internal.HostFromURL(u) + if nil != err || "response.com" != host { + t.Error(host) + } + // segment containing request and response + u, err = externalSegmentURL(ExternalSegment{ + Request: req, + Response: response, + }) + host = internal.HostFromURL(u) + if nil != err || "response.com" != host { + t.Error(host) + } + // segment containing url, request, and response + u, err = externalSegmentURL(ExternalSegment{ + URL: rawURL, + Request: req, + Response: response, + }) + host = internal.HostFromURL(u) + if nil != err || "url.com" != host { + t.Error(err, host) + } +} + +func TestZeroSegmentsSafe(t *testing.T) { + s := Segment{} + s.End() + + StartSegmentNow(nil) + + ds := DatastoreSegment{} + ds.End() + + es := ExternalSegment{} + es.End() + + StartSegment(nil, "").End() + + StartExternalSegment(nil, nil).End() +} + +func TestTraceSegmentDefer(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, helloRequest) + func() { + defer StartSegment(txn, "segment").End() + }() + txn.End() + scope := "WebTransaction/Go/hello" + app.ExpectMetrics(t, append([]internal.WantMetric{ + {Name: "Custom/segment", Scope: "", Forced: false, Data: nil}, + {Name: "Custom/segment", Scope: scope, Forced: false, Data: nil}, + }, webMetrics...)) +} + +func TestTraceSegmentNilErr(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, helloRequest) + err := StartSegment(txn, "segment").End() + if nil != err { + t.Error(err) + } + txn.End() + scope := "WebTransaction/Go/hello" + app.ExpectMetrics(t, append([]internal.WantMetric{ + {Name: "Custom/segment", Scope: "", Forced: false, Data: nil}, + {Name: "Custom/segment", Scope: scope, Forced: false, Data: nil}, + }, webMetrics...)) +} + +func TestTraceSegmentOutOfOrder(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s1 := StartSegment(txn, "s1") + s2 := StartSegment(txn, "s1") + err1 := s1.End() + err2 := s2.End() + if nil != err1 { + t.Error(err1) + } + if nil == err2 { + t.Error(err2) + } + txn.End() + scope := "WebTransaction/Go/hello" + app.ExpectMetrics(t, append([]internal.WantMetric{ + {Name: "Custom/s1", Scope: "", Forced: false, Data: nil}, + {Name: "Custom/s1", Scope: scope, Forced: false, Data: nil}, + }, webMetrics...)) +} + +func TestTraceSegmentEndedBeforeStartSegment(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, helloRequest) + txn.End() + s := StartSegment(txn, "segment") + err := s.End() + if err != errAlreadyEnded { + t.Error(err) + } + app.ExpectMetrics(t, webMetrics) +} + +func TestTraceSegmentEndedBeforeEndSegment(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s := StartSegment(txn, "segment") + txn.End() + err := s.End() + if err != errAlreadyEnded { + t.Error(err) + } + + app.ExpectMetrics(t, webMetrics) +} + +func TestTraceSegmentPanic(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, helloRequest) + func() { + defer func() { + recover() + }() + + func() { + defer StartSegment(txn, "f1").End() + + func() { + t := StartSegment(txn, "f2") + + func() { + defer StartSegment(txn, "f3").End() + + func() { + StartSegment(txn, "f4") + + panic(nil) + }() + }() + + t.End() + }() + }() + }() + + txn.End() + scope := "WebTransaction/Go/hello" + app.ExpectMetrics(t, append([]internal.WantMetric{ + {Name: "Custom/f1", Scope: "", Forced: false, Data: nil}, + {Name: "Custom/f1", Scope: scope, Forced: false, Data: nil}, + {Name: "Custom/f3", Scope: "", Forced: false, Data: nil}, + {Name: "Custom/f3", Scope: scope, Forced: false, Data: nil}, + }, webMetrics...)) +} + +func TestTraceSegmentNilTxn(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s := Segment{Name: "hello"} + err := s.End() + if err != nil { + t.Error(err) + } + txn.End() + app.ExpectMetrics(t, webMetrics) +} + +func TestTraceDatastore(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s := DatastoreSegment{} + s.StartTime = txn.StartSegmentNow() + s.Product = DatastoreMySQL + s.Collection = "my_table" + s.Operation = "SELECT" + err := s.End() + if nil != err { + t.Error(err) + } + txn.NoticeError(myError{}) + txn.End() + scope := "WebTransaction/Go/hello" + app.ExpectMetrics(t, append([]internal.WantMetric{ + {Name: "Datastore/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/MySQL/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/MySQL/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/operation/MySQL/SELECT", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/statement/MySQL/my_table/SELECT", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/statement/MySQL/my_table/SELECT", Scope: scope, Forced: false, Data: nil}, + }, webErrorMetrics...)) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.myError", + "error.message": "my msg", + "transactionName": "WebTransaction/Go/hello", + "databaseCallCount": 1, + "databaseDuration": internal.MatchAnything, + }, + }}) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/hello", + "nr.apdexPerfZone": "F", + "databaseCallCount": 1, + "databaseDuration": internal.MatchAnything, + }, + }}) +} + +func TestTraceDatastoreBackground(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, nil) + s := DatastoreSegment{ + StartTime: txn.StartSegmentNow(), + Product: DatastoreMySQL, + Collection: "my_table", + Operation: "SELECT", + } + err := s.End() + if nil != err { + t.Error(err) + } + txn.NoticeError(myError{}) + txn.End() + scope := "OtherTransaction/Go/hello" + app.ExpectMetrics(t, append([]internal.WantMetric{ + {Name: "Datastore/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/allOther", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/MySQL/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/MySQL/allOther", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/operation/MySQL/SELECT", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/statement/MySQL/my_table/SELECT", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/statement/MySQL/my_table/SELECT", Scope: scope, Forced: false, Data: nil}, + }, backgroundErrorMetrics...)) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.myError", + "error.message": "my msg", + "transactionName": "OtherTransaction/Go/hello", + "databaseCallCount": 1, + "databaseDuration": internal.MatchAnything, + }, + }}) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "OtherTransaction/Go/hello", + "databaseCallCount": 1, + "databaseDuration": internal.MatchAnything, + }, + }}) +} + +func TestTraceDatastoreMissingProductOperationCollection(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s := DatastoreSegment{ + StartTime: txn.StartSegmentNow(), + } + err := s.End() + if nil != err { + t.Error(err) + } + txn.NoticeError(myError{}) + txn.End() + scope := "WebTransaction/Go/hello" + app.ExpectMetrics(t, append([]internal.WantMetric{ + {Name: "Datastore/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/Unknown/all", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/Unknown/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "Datastore/operation/Unknown/other", Scope: "", Forced: false, Data: nil}, + {Name: "Datastore/operation/Unknown/other", Scope: scope, Forced: false, Data: nil}, + }, webErrorMetrics...)) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.myError", + "error.message": "my msg", + "transactionName": "WebTransaction/Go/hello", + "databaseCallCount": 1, + "databaseDuration": internal.MatchAnything, + }, + }}) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/hello", + "nr.apdexPerfZone": "F", + "databaseCallCount": 1, + "databaseDuration": internal.MatchAnything, + }, + }}) +} + +func TestTraceDatastoreNilTxn(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, helloRequest) + var s DatastoreSegment + s.Product = DatastoreMySQL + s.Collection = "my_table" + s.Operation = "SELECT" + err := s.End() + if nil != err { + t.Error(err) + } + txn.NoticeError(myError{}) + txn.End() + app.ExpectMetrics(t, webErrorMetrics) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.myError", + "error.message": "my msg", + "transactionName": "WebTransaction/Go/hello", + }, + }}) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/hello", + "nr.apdexPerfZone": "F", + }, + }}) +} + +func TestTraceDatastoreTxnEnded(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, helloRequest) + txn.NoticeError(myError{}) + s := DatastoreSegment{ + StartTime: txn.StartSegmentNow(), + Product: DatastoreMySQL, + Collection: "my_table", + Operation: "SELECT", + } + txn.End() + err := s.End() + if errAlreadyEnded != err { + t.Error(err) + } + app.ExpectMetrics(t, webErrorMetrics) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.myError", + "error.message": "my msg", + "transactionName": "WebTransaction/Go/hello", + }, + }}) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/hello", + "nr.apdexPerfZone": "F", + }, + }}) +} + +func TestTraceExternal(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s := ExternalSegment{ + StartTime: txn.StartSegmentNow(), + URL: "http://example.com/", + } + err := s.End() + if nil != err { + t.Error(err) + } + txn.NoticeError(myError{}) + txn.End() + scope := "WebTransaction/Go/hello" + app.ExpectMetrics(t, append([]internal.WantMetric{ + {Name: "External/all", Scope: "", Forced: true, Data: nil}, + {Name: "External/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "External/example.com/all", Scope: "", Forced: false, Data: nil}, + {Name: "External/example.com/all", Scope: scope, Forced: false, Data: nil}, + }, webErrorMetrics...)) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.myError", + "error.message": "my msg", + "transactionName": "WebTransaction/Go/hello", + "externalCallCount": 1, + "externalDuration": internal.MatchAnything, + }, + }}) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/hello", + "nr.apdexPerfZone": "F", + "externalCallCount": 1, + "externalDuration": internal.MatchAnything, + }, + }}) +} + +func TestTraceExternalBadURL(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s := ExternalSegment{ + StartTime: txn.StartSegmentNow(), + URL: ":example.com/", + } + err := s.End() + if nil == err { + t.Error(err) + } + txn.NoticeError(myError{}) + txn.End() + app.ExpectMetrics(t, webErrorMetrics) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.myError", + "error.message": "my msg", + "transactionName": "WebTransaction/Go/hello", + }, + }}) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/hello", + "nr.apdexPerfZone": "F", + }, + }}) +} + +func TestTraceExternalBackground(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, nil) + s := ExternalSegment{ + StartTime: txn.StartSegmentNow(), + URL: "http://example.com/", + } + err := s.End() + if nil != err { + t.Error(err) + } + txn.NoticeError(myError{}) + txn.End() + scope := "OtherTransaction/Go/hello" + app.ExpectMetrics(t, append([]internal.WantMetric{ + {Name: "External/all", Scope: "", Forced: true, Data: nil}, + {Name: "External/allOther", Scope: "", Forced: true, Data: nil}, + {Name: "External/example.com/all", Scope: "", Forced: false, Data: nil}, + {Name: "External/example.com/all", Scope: scope, Forced: false, Data: nil}, + }, backgroundErrorMetrics...)) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.myError", + "error.message": "my msg", + "transactionName": "OtherTransaction/Go/hello", + "externalCallCount": 1, + "externalDuration": internal.MatchAnything, + }, + }}) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "OtherTransaction/Go/hello", + "externalCallCount": 1, + "externalDuration": internal.MatchAnything, + }, + }}) +} + +func TestTraceExternalMissingURL(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s := ExternalSegment{ + StartTime: txn.StartSegmentNow(), + } + err := s.End() + if nil != err { + t.Error(err) + } + txn.NoticeError(myError{}) + txn.End() + scope := "WebTransaction/Go/hello" + app.ExpectMetrics(t, append([]internal.WantMetric{ + {Name: "External/all", Scope: "", Forced: true, Data: nil}, + {Name: "External/allWeb", Scope: "", Forced: true, Data: nil}, + {Name: "External/unknown/all", Scope: "", Forced: false, Data: nil}, + {Name: "External/unknown/all", Scope: scope, Forced: false, Data: nil}, + }, webErrorMetrics...)) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.myError", + "error.message": "my msg", + "transactionName": "WebTransaction/Go/hello", + "externalCallCount": 1, + "externalDuration": internal.MatchAnything, + }, + }}) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/hello", + "nr.apdexPerfZone": "F", + "externalCallCount": 1, + "externalDuration": internal.MatchAnything, + }, + }}) +} + +func TestTraceExternalNilTxn(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, helloRequest) + txn.NoticeError(myError{}) + var s ExternalSegment + err := s.End() + if nil != err { + t.Error(err) + } + txn.End() + app.ExpectMetrics(t, webErrorMetrics) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.myError", + "error.message": "my msg", + "transactionName": "WebTransaction/Go/hello", + }, + }}) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/hello", + "nr.apdexPerfZone": "F", + }, + }}) +} + +func TestTraceExternalTxnEnded(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, helloRequest) + txn.NoticeError(myError{}) + s := ExternalSegment{ + StartTime: txn.StartSegmentNow(), + URL: "http://example.com/", + } + txn.End() + err := s.End() + if err != errAlreadyEnded { + t.Error(err) + } + app.ExpectMetrics(t, webErrorMetrics) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.myError", + "error.message": "my msg", + "transactionName": "WebTransaction/Go/hello", + }, + }}) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "WebTransaction/Go/hello", + "nr.apdexPerfZone": "F", + }, + }}) +} + +func TestRoundTripper(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, nil) + url := "http://example.com/" + client := &http.Client{} + inner := roundTripperFunc(func(r *http.Request) (*http.Response, error) { + // TODO test that request headers have been set here. + if r.URL.String() != url { + t.Error(r.URL.String()) + } + return nil, errors.New("hello") + }) + client.Transport = NewRoundTripper(txn, inner) + resp, err := client.Get(url) + if resp != nil || err == nil { + t.Error(resp, err.Error()) + } + txn.NoticeError(myError{}) + txn.End() + scope := "OtherTransaction/Go/hello" + app.ExpectMetrics(t, append([]internal.WantMetric{ + {Name: "External/all", Scope: "", Forced: true, Data: nil}, + {Name: "External/allOther", Scope: "", Forced: true, Data: nil}, + {Name: "External/example.com/all", Scope: "", Forced: false, Data: nil}, + {Name: "External/example.com/all", Scope: scope, Forced: false, Data: nil}, + }, backgroundErrorMetrics...)) + app.ExpectErrorEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "error.class": "newrelic.myError", + "error.message": "my msg", + "transactionName": "OtherTransaction/Go/hello", + "externalCallCount": 1, + "externalDuration": internal.MatchAnything, + }, + }}) + app.ExpectTxnEvents(t, []internal.WantEvent{{ + Intrinsics: map[string]interface{}{ + "name": "OtherTransaction/Go/hello", + "externalCallCount": 1, + "externalDuration": internal.MatchAnything, + }, + }}) +} + +func TestTraceBelowThreshold(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, helloRequest) + txn.End() + app.ExpectTxnTraces(t, []internal.WantTxnTrace{}) +} + +func TestTraceBelowThresholdBackground(t *testing.T) { + app := testApp(nil, nil, t) + txn := app.StartTransaction("hello", nil, nil) + txn.End() + app.ExpectTxnTraces(t, []internal.WantTxnTrace{}) +} + +func TestTraceNoSegments(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.TransactionTracer.Threshold.IsApdexFailing = false + cfg.TransactionTracer.Threshold.Duration = 0 + cfg.TransactionTracer.SegmentThreshold = 0 + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + txn.End() + app.ExpectTxnTraces(t, []internal.WantTxnTrace{{ + MetricName: "WebTransaction/Go/hello", + CleanURL: "/hello", + NumSegments: 0, + }}) +} + +func TestTraceDisabledLocally(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.TransactionTracer.Threshold.IsApdexFailing = false + cfg.TransactionTracer.Threshold.Duration = 0 + cfg.TransactionTracer.SegmentThreshold = 0 + cfg.TransactionTracer.Enabled = false + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + txn.End() + app.ExpectTxnTraces(t, []internal.WantTxnTrace{}) +} + +func TestTraceDisabledRemotely(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.TransactionTracer.Threshold.IsApdexFailing = false + cfg.TransactionTracer.Threshold.Duration = 0 + cfg.TransactionTracer.SegmentThreshold = 0 + } + replyfn := func(reply *internal.ConnectReply) { + reply.CollectTraces = false + } + app := testApp(replyfn, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + txn.End() + app.ExpectTxnTraces(t, []internal.WantTxnTrace{}) +} + +func TestTraceWithSegments(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.TransactionTracer.Threshold.IsApdexFailing = false + cfg.TransactionTracer.Threshold.Duration = 0 + cfg.TransactionTracer.SegmentThreshold = 0 + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s1 := StartSegment(txn, "s1") + s1.End() + s2 := ExternalSegment{ + StartTime: StartSegmentNow(txn), + URL: "http://example.com", + } + s2.End() + s3 := DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastoreMySQL, + Collection: "my_table", + Operation: "SELECT", + } + s3.End() + txn.End() + app.ExpectTxnTraces(t, []internal.WantTxnTrace{{ + MetricName: "WebTransaction/Go/hello", + CleanURL: "/hello", + NumSegments: 3, + }}) +} + +func TestTraceSegmentsBelowThreshold(t *testing.T) { + cfgfn := func(cfg *Config) { + cfg.TransactionTracer.Threshold.IsApdexFailing = false + cfg.TransactionTracer.Threshold.Duration = 0 + cfg.TransactionTracer.SegmentThreshold = 1 * time.Hour + } + app := testApp(nil, cfgfn, t) + txn := app.StartTransaction("hello", nil, helloRequest) + s1 := StartSegment(txn, "s1") + s1.End() + s2 := ExternalSegment{ + StartTime: StartSegmentNow(txn), + URL: "http://example.com", + } + s2.End() + s3 := DatastoreSegment{ + StartTime: StartSegmentNow(txn), + Product: DatastoreMySQL, + Collection: "my_table", + Operation: "SELECT", + } + s3.End() + txn.End() + app.ExpectTxnTraces(t, []internal.WantTxnTrace{{ + MetricName: "WebTransaction/Go/hello", + CleanURL: "/hello", + NumSegments: 0, + }}) +} diff --git a/vendor/github.com/newrelic/go-agent/internal_txn.go b/vendor/github.com/newrelic/go-agent/internal_txn.go new file mode 100644 index 00000000..35bcb76d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal_txn.go @@ -0,0 +1,471 @@ +package newrelic + +import ( + "errors" + "net/http" + "net/url" + "reflect" + "sync" + "time" + + "github.com/newrelic/go-agent/internal" +) + +type txnInput struct { + W http.ResponseWriter + Config Config + Reply *internal.ConnectReply + Consumer dataConsumer + attrConfig *internal.AttributeConfig +} + +type txn struct { + txnInput + // This mutex is required since the consumer may call the public API + // interface functions from different routines. + sync.Mutex + // finished indicates whether or not End() has been called. After + // finished has been set to true, no recording should occur. + finished bool + + Name string // Work in progress name + ignore bool + + // wroteHeader prevents capturing multiple response code errors if the + // user erroneously calls WriteHeader multiple times. + wroteHeader bool + + internal.TxnData +} + +func newTxn(input txnInput, req *http.Request, name string) *txn { + txn := &txn{ + txnInput: input, + } + txn.Start = time.Now() + txn.Name = name + txn.IsWeb = nil != req + txn.Attrs = internal.NewAttributes(input.attrConfig) + if nil != req { + txn.Queuing = internal.QueueDuration(req.Header, txn.Start) + internal.RequestAgentAttributes(txn.Attrs, req) + } + txn.Attrs.Agent.HostDisplayName = txn.Config.HostDisplayName + txn.TxnTrace.Enabled = txn.txnTracesEnabled() + txn.TxnTrace.SegmentThreshold = txn.Config.TransactionTracer.SegmentThreshold + txn.StackTraceThreshold = txn.Config.TransactionTracer.StackTraceThreshold + txn.SlowQueriesEnabled = txn.slowQueriesEnabled() + txn.SlowQueryThreshold = txn.Config.DatastoreTracer.SlowQuery.Threshold + if nil != req && nil != req.URL { + txn.CleanURL = internal.SafeURL(req.URL) + } + + return txn +} + +func (txn *txn) slowQueriesEnabled() bool { + return txn.Config.DatastoreTracer.SlowQuery.Enabled && + txn.Reply.CollectTraces +} + +func (txn *txn) txnTracesEnabled() bool { + return txn.Config.TransactionTracer.Enabled && + txn.Reply.CollectTraces +} + +func (txn *txn) txnEventsEnabled() bool { + return txn.Config.TransactionEvents.Enabled && + txn.Reply.CollectAnalyticsEvents +} + +func (txn *txn) errorEventsEnabled() bool { + return txn.Config.ErrorCollector.CaptureEvents && + txn.Reply.CollectErrorEvents +} + +func (txn *txn) freezeName() { + if txn.ignore || ("" != txn.FinalName) { + return + } + + txn.FinalName = internal.CreateFullTxnName(txn.Name, txn.Reply, txn.IsWeb) + if "" == txn.FinalName { + txn.ignore = true + } +} + +func (txn *txn) getsApdex() bool { + return txn.IsWeb +} + +func (txn *txn) txnTraceThreshold() time.Duration { + if txn.Config.TransactionTracer.Threshold.IsApdexFailing { + return internal.ApdexFailingThreshold(txn.ApdexThreshold) + } + return txn.Config.TransactionTracer.Threshold.Duration +} + +func (txn *txn) shouldSaveTrace() bool { + return txn.txnTracesEnabled() && + (txn.Duration >= txn.txnTraceThreshold()) +} + +func (txn *txn) MergeIntoHarvest(h *internal.Harvest) { + internal.CreateTxnMetrics(&txn.TxnData, h.Metrics) + internal.MergeBreakdownMetrics(&txn.TxnData, h.Metrics) + + if txn.txnEventsEnabled() { + // Allocate a new TxnEvent to prevent a reference to the large transaction. + alloc := new(internal.TxnEvent) + *alloc = txn.TxnData.TxnEvent + h.TxnEvents.AddTxnEvent(alloc) + } + + internal.MergeTxnErrors(&h.ErrorTraces, txn.Errors, txn.TxnEvent) + + if txn.errorEventsEnabled() { + for _, e := range txn.Errors { + errEvent := &internal.ErrorEvent{ + ErrorData: *e, + TxnEvent: txn.TxnEvent, + } + // Since the stack trace is not used in error events, remove the reference + // to minimize memory. + errEvent.Stack = nil + h.ErrorEvents.Add(errEvent) + } + } + + if txn.shouldSaveTrace() { + h.TxnTraces.Witness(internal.HarvestTrace{ + TxnEvent: txn.TxnEvent, + Trace: txn.TxnTrace, + }) + } + + if nil != txn.SlowQueries { + h.SlowSQLs.Merge(txn.SlowQueries, txn.FinalName, txn.CleanURL) + } +} + +func responseCodeIsError(cfg *Config, code int) bool { + if code < http.StatusBadRequest { // 400 + return false + } + for _, ignoreCode := range cfg.ErrorCollector.IgnoreStatusCodes { + if code == ignoreCode { + return false + } + } + return true +} + +func headersJustWritten(txn *txn, code int) { + if txn.finished { + return + } + if txn.wroteHeader { + return + } + txn.wroteHeader = true + + internal.ResponseHeaderAttributes(txn.Attrs, txn.W.Header()) + internal.ResponseCodeAttribute(txn.Attrs, code) + + if responseCodeIsError(&txn.Config, code) { + e := internal.TxnErrorFromResponseCode(time.Now(), code) + e.Stack = internal.GetStackTrace(1) + txn.noticeErrorInternal(e) + } +} + +func (txn *txn) Header() http.Header { return txn.W.Header() } + +func (txn *txn) Write(b []byte) (int, error) { + n, err := txn.W.Write(b) + + txn.Lock() + defer txn.Unlock() + + headersJustWritten(txn, http.StatusOK) + + return n, err +} + +func (txn *txn) WriteHeader(code int) { + txn.W.WriteHeader(code) + + txn.Lock() + defer txn.Unlock() + + headersJustWritten(txn, code) +} + +func (txn *txn) End() error { + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return errAlreadyEnded + } + + txn.finished = true + + r := recover() + if nil != r { + e := internal.TxnErrorFromPanic(time.Now(), r) + e.Stack = internal.GetStackTrace(0) + txn.noticeErrorInternal(e) + } + + txn.Stop = time.Now() + txn.Duration = txn.Stop.Sub(txn.Start) + if children := internal.TracerRootChildren(&txn.TxnData); txn.Duration > children { + txn.Exclusive = txn.Duration - children + } + + txn.freezeName() + + // Assign apdexThreshold regardless of whether or not the transaction + // gets apdex since it may be used to calculate the trace threshold. + txn.ApdexThreshold = internal.CalculateApdexThreshold(txn.Reply, txn.FinalName) + + if txn.getsApdex() { + if txn.HasErrors() { + txn.Zone = internal.ApdexFailing + } else { + txn.Zone = internal.CalculateApdexZone(txn.ApdexThreshold, txn.Duration) + } + } else { + txn.Zone = internal.ApdexNone + } + + if txn.Config.Logger.DebugEnabled() { + txn.Config.Logger.Debug("transaction ended", map[string]interface{}{ + "name": txn.FinalName, + "duration_ms": txn.Duration.Seconds() * 1000.0, + "ignored": txn.ignore, + "run": txn.Reply.RunID, + }) + } + + if !txn.ignore { + txn.Consumer.Consume(txn.Reply.RunID, txn) + } + + // Note that if a consumer uses `panic(nil)`, the panic will not + // propagate. + if nil != r { + panic(r) + } + + return nil +} + +func (txn *txn) AddAttribute(name string, value interface{}) error { + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return errAlreadyEnded + } + + return internal.AddUserAttribute(txn.Attrs, name, value, internal.DestAll) +} + +var ( + errorsLocallyDisabled = errors.New("errors locally disabled") + errorsRemotelyDisabled = errors.New("errors remotely disabled") + errNilError = errors.New("nil error") + errAlreadyEnded = errors.New("transaction has already ended") +) + +const ( + highSecurityErrorMsg = "message removed by high security setting" +) + +func (txn *txn) noticeErrorInternal(err internal.ErrorData) error { + if !txn.Config.ErrorCollector.Enabled { + return errorsLocallyDisabled + } + + if !txn.Reply.CollectErrors { + return errorsRemotelyDisabled + } + + if nil == txn.Errors { + txn.Errors = internal.NewTxnErrors(internal.MaxTxnErrors) + } + + if txn.Config.HighSecurity { + err.Msg = highSecurityErrorMsg + } + + txn.Errors.Add(err) + + return nil +} + +func (txn *txn) NoticeError(err error) error { + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return errAlreadyEnded + } + + if nil == err { + return errNilError + } + + e := internal.ErrorData{ + When: time.Now(), + Msg: err.Error(), + } + if ec, ok := err.(ErrorClasser); ok { + e.Klass = ec.ErrorClass() + } + if "" == e.Klass { + e.Klass = reflect.TypeOf(err).String() + } + if st, ok := err.(StackTracer); ok { + e.Stack = st.StackTrace() + // Note that if the provided stack trace is excessive in length, + // it will be truncated during JSON creation. + } + if nil == e.Stack { + e.Stack = internal.GetStackTrace(2) + } + + return txn.noticeErrorInternal(e) +} + +func (txn *txn) SetName(name string) error { + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return errAlreadyEnded + } + + txn.Name = name + return nil +} + +func (txn *txn) Ignore() error { + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return errAlreadyEnded + } + txn.ignore = true + return nil +} + +func (txn *txn) StartSegmentNow() SegmentStartTime { + var s internal.SegmentStartTime + txn.Lock() + if !txn.finished { + s = internal.StartSegment(&txn.TxnData, time.Now()) + } + txn.Unlock() + return SegmentStartTime{ + segment: segment{ + start: s, + txn: txn, + }, + } +} + +type segment struct { + start internal.SegmentStartTime + txn *txn +} + +func endSegment(s Segment) error { + txn := s.StartTime.txn + if nil == txn { + return nil + } + var err error + txn.Lock() + if txn.finished { + err = errAlreadyEnded + } else { + err = internal.EndBasicSegment(&txn.TxnData, s.StartTime.start, time.Now(), s.Name) + } + txn.Unlock() + return err +} + +func endDatastore(s DatastoreSegment) error { + txn := s.StartTime.txn + if nil == txn { + return nil + } + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return errAlreadyEnded + } + if txn.Config.HighSecurity { + s.QueryParameters = nil + } + if !txn.Config.DatastoreTracer.QueryParameters.Enabled { + s.QueryParameters = nil + } + if !txn.Config.DatastoreTracer.DatabaseNameReporting.Enabled { + s.DatabaseName = "" + } + if !txn.Config.DatastoreTracer.InstanceReporting.Enabled { + s.Host = "" + s.PortPathOrID = "" + } + return internal.EndDatastoreSegment(internal.EndDatastoreParams{ + Tracer: &txn.TxnData, + Start: s.StartTime.start, + Now: time.Now(), + Product: string(s.Product), + Collection: s.Collection, + Operation: s.Operation, + ParameterizedQuery: s.ParameterizedQuery, + QueryParameters: s.QueryParameters, + Host: s.Host, + PortPathOrID: s.PortPathOrID, + Database: s.DatabaseName, + }) +} + +func externalSegmentURL(s ExternalSegment) (*url.URL, error) { + if "" != s.URL { + return url.Parse(s.URL) + } + r := s.Request + if nil != s.Response && nil != s.Response.Request { + r = s.Response.Request + } + if r != nil { + return r.URL, nil + } + return nil, nil +} + +func endExternal(s ExternalSegment) error { + txn := s.StartTime.txn + if nil == txn { + return nil + } + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return errAlreadyEnded + } + u, err := externalSegmentURL(s) + if nil != err { + return err + } + return internal.EndExternalSegment(&txn.TxnData, s.StartTime.start, time.Now(), u) +} diff --git a/vendor/github.com/newrelic/go-agent/log.go b/vendor/github.com/newrelic/go-agent/log.go new file mode 100644 index 00000000..56b09361 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/log.go @@ -0,0 +1,30 @@ +package newrelic + +import ( + "io" + + "github.com/newrelic/go-agent/internal/logger" +) + +// Logger is the interface that is used for logging in the go-agent. Assign the +// Config.Logger field to the Logger you wish to use. Loggers must be safe for +// use in multiple goroutines. +// +// For an example implementation, see: _integrations/nrlogrus/nrlogrus.go +type Logger interface { + Error(msg string, context map[string]interface{}) + Warn(msg string, context map[string]interface{}) + Info(msg string, context map[string]interface{}) + Debug(msg string, context map[string]interface{}) + DebugEnabled() bool +} + +// NewLogger creates a basic Logger at info level. +func NewLogger(w io.Writer) Logger { + return logger.New(w, false) +} + +// NewDebugLogger creates a basic Logger at debug level. +func NewDebugLogger(w io.Writer) Logger { + return logger.New(w, true) +} diff --git a/vendor/github.com/newrelic/go-agent/segments.go b/vendor/github.com/newrelic/go-agent/segments.go new file mode 100644 index 00000000..3d3450e2 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/segments.go @@ -0,0 +1,115 @@ +package newrelic + +import "net/http" + +// SegmentStartTime is created by Transaction.StartSegmentNow and marks the +// beginning of a segment. A segment with a zero-valued SegmentStartTime may +// safely be ended. +type SegmentStartTime struct{ segment } + +// Segment is used to instrument functions, methods, and blocks of code. The +// easiest way use Segment is the StartSegment function. +type Segment struct { + StartTime SegmentStartTime + Name string +} + +// DatastoreSegment is used to instrument calls to databases and object stores. +// Here is an example: +// +// defer newrelic.DatastoreSegment{ +// StartTime: newrelic.StartSegmentNow(txn), +// Product: newrelic.DatastoreMySQL, +// Collection: "my_table", +// Operation: "SELECT", +// }.End() +// +type DatastoreSegment struct { + StartTime SegmentStartTime + // Product is the datastore type. See the constants in datastore.go. + Product DatastoreProduct + // Collection is the table or group. + Collection string + // Operation is the relevant action, e.g. "SELECT" or "GET". + Operation string + // ParameterizedQuery may be set to the query being performed. It must + // not contain any raw parameters, only placeholders. + ParameterizedQuery string + // QueryParameters may be used to provide query parameters. Care should + // be taken to only provide parameters which are not sensitive. + // QueryParameters are ignored in high security mode. + QueryParameters map[string]interface{} + // Host is the name of the server hosting the datastore. + Host string + // PortPathOrID can represent either the port, path, or id of the + // datastore being connected to. + PortPathOrID string + // DatabaseName is name of database where the current query is being + // executed. + DatabaseName string +} + +// ExternalSegment is used to instrument external calls. StartExternalSegment +// is recommended when you have access to an http.Request. +type ExternalSegment struct { + StartTime SegmentStartTime + Request *http.Request + Response *http.Response + // If you do not have access to the request, this URL field should be + // used to indicate the endpoint. NOTE: If non-empty, this field + // is parsed using url.Parse and therefore it MUST include the protocol + // (eg. "http://"). + URL string +} + +// End finishes the segment. +func (s Segment) End() error { return endSegment(s) } + +// End finishes the datastore segment. +func (s DatastoreSegment) End() error { return endDatastore(s) } + +// End finishes the external segment. +func (s ExternalSegment) End() error { return endExternal(s) } + +// StartSegmentNow helps avoid Transaction nil checks. +func StartSegmentNow(txn Transaction) SegmentStartTime { + if nil != txn { + return txn.StartSegmentNow() + } + return SegmentStartTime{} +} + +// StartSegment makes it easy to instrument segments. To time a function, do +// the following: +// +// func timeMe(txn newrelic.Transaction) { +// defer newrelic.StartSegment(txn, "timeMe").End() +// // ... function code here ... +// } +// +// To time a block of code, do the following: +// +// segment := StartSegment(txn, "myBlock") +// // ... code you want to time here ... +// segment.End() +// +func StartSegment(txn Transaction, name string) Segment { + return Segment{ + StartTime: StartSegmentNow(txn), + Name: name, + } +} + +// StartExternalSegment makes it easier to instrument external calls. +// +// segment := newrelic.StartExternalSegment(txn, request) +// resp, err := client.Do(request) +// segment.Response = resp +// segment.End() +// +func StartExternalSegment(txn Transaction, request *http.Request) ExternalSegment { + return ExternalSegment{ + StartTime: StartSegmentNow(txn), + Request: request, + } +} diff --git a/vendor/github.com/newrelic/go-agent/transaction.go b/vendor/github.com/newrelic/go-agent/transaction.go new file mode 100644 index 00000000..aef66d84 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/transaction.go @@ -0,0 +1,45 @@ +package newrelic + +import "net/http" + +// Transaction represents a request or a background task. +// Each Transaction should only be used in a single goroutine. +type Transaction interface { + // If StartTransaction is called with a non-nil http.ResponseWriter then + // the Transaction may be used in its place. This allows + // instrumentation of the response code and response headers. + http.ResponseWriter + + // End finishes the current transaction, stopping all further + // instrumentation. Subsequent calls to End will have no effect. + End() error + + // Ignore ensures that this transaction's data will not be recorded. + Ignore() error + + // SetName names the transaction. Transactions will not be grouped + // usefully if too many unique names are used. + SetName(name string) error + + // NoticeError records an error. The first five errors per transaction + // are recorded (this behavior is subject to potential change in the + // future). + NoticeError(err error) error + + // AddAttribute adds a key value pair to the current transaction. This + // information is attached to errors, transaction events, and error + // events. The key must contain fewer than than 255 bytes. The value + // must be a number, string, or boolean. Attribute configuration is + // applied (see config.go). + // + // For more information, see: + // https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-metrics/collect-custom-attributes + AddAttribute(key string, value interface{}) error + + // StartSegmentNow allows the timing of functions, external calls, and + // datastore calls. The segments of each transaction MUST be used in a + // single goroutine. Consumers are encouraged to use the + // `StartSegmentNow` functions which checks if the Transaction is nil. + // See segments.go + StartSegmentNow() SegmentStartTime +} diff --git a/vendor/github.com/newrelic/go-agent/version.go b/vendor/github.com/newrelic/go-agent/version.go new file mode 100644 index 00000000..7120b6ee --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/version.go @@ -0,0 +1,10 @@ +package newrelic + +const ( + major = "1" + minor = "9" + patch = "0" + + // Version is the full string version of this Go Agent. + Version = major + "." + minor + "." + patch +) diff --git a/vendor/github.com/yadvendar/negroni-newrelic-go-agent/README.md b/vendor/github.com/yadvendar/negroni-newrelic-go-agent/README.md new file mode 100644 index 00000000..21d111e1 --- /dev/null +++ b/vendor/github.com/yadvendar/negroni-newrelic-go-agent/README.md @@ -0,0 +1,57 @@ +# negroni-newrelic-go-agent +[New Relic Go Agent](https://github.com/newrelic/go-agent) middleware for [negroni](https://github.com/codegangsta/negroni) + +[New Relic](https://newrelic.com) has recently released a Go Agent in their APM module. Its currently in beta and in order to get started you can request for a beta token by filling [the beta agreement form](http://goo.gl/forms/Rcv1b10Qvt1ENLlr1). + +If you have microservice architecture using negroni and are looking for a middleware to attach new relic go agent to your existing stack of middlewares, then you can use [negroni-newrelic-go-agent](https://github.com/yadvendar/negroni-newrelic-go-agent) to achieve the same. + +Usage +----- + +```go +package main + +import ( + "fmt" + "net/http" + "os" + + "github.com/codegangsta/negroni" + newrelic "github.com/yadvendar/negroni-newrelic-go-agent" +) + +func main() { + r := http.NewServeMux() + r.HandleFunc(`/`, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, "success!\n") + }) + + n := negroni.New() + config := newrelic.NewConfig("APP_SERVER_NAME", "NEWRELIC_LICENSE_KEY") + config.BetaToken = "BETA_TOKEN" // this is valid only till go-agent is in beta + config.Enabled = true + newRelicMiddleware, err := newrelic.New(config) + if err != nil { + fmt.Println("Unable to initialize newrelic. Error=" + err.Error()) + os.Exit(0) + } + n.Use(newRelicMiddleware) + n.UseHandler(r) + + n.Run(":3000") +} + +``` + +See a running [example](https://github.com/yadvendar/negroni-newrelic-go-agent/blob/master/example/example.go). + +Credits +------- + +[New Relic Go Agent](https://github.com/newrelic/go-agent) + +License +------- + +See [LICENSE.txt](https://github.com/newrelic/go-agent/blob/master/LICENSE.txt) diff --git a/vendor/github.com/yadvendar/negroni-newrelic-go-agent/example/example.go b/vendor/github.com/yadvendar/negroni-newrelic-go-agent/example/example.go new file mode 100644 index 00000000..78a9edac --- /dev/null +++ b/vendor/github.com/yadvendar/negroni-newrelic-go-agent/example/example.go @@ -0,0 +1,32 @@ +package main + +import ( + "fmt" + "net/http" + "os" + + "github.com/codegangsta/negroni" + newrelic "github.com/yadvendar/negroni-newrelic-go-agent" +) + +func main() { + r := http.NewServeMux() + r.HandleFunc(`/`, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, "success!\n") + }) + + n := negroni.New() + config := newrelic.NewConfig("APP_SERVER_NAME", "NEWRELIC_LICENSE_KEY") + config.BetaToken = "BETA_TOKEN" // this is valid only till go-agent is in beta + config.Enabled = true + newRelicMiddleware, err := newrelic.New(config) + if err != nil { + fmt.Println("Unable to initialize newrelic. Error=" + err.Error()) + os.Exit(0) + } + n.Use(newRelicMiddleware) + n.UseHandler(r) + + n.Run(":3000") +} diff --git a/vendor/github.com/yadvendar/negroni-newrelic-go-agent/newrelic.go b/vendor/github.com/yadvendar/negroni-newrelic-go-agent/newrelic.go new file mode 100644 index 00000000..8d2b2e10 --- /dev/null +++ b/vendor/github.com/yadvendar/negroni-newrelic-go-agent/newrelic.go @@ -0,0 +1,32 @@ +package negroninewrelic + +import ( + "net/http" + + "github.com/newrelic/go-agent" +) + +type Newrelic struct { + Application *newrelic.Application + Transaction *newrelic.Transaction +} + +func NewConfig(applicationName string, licenseKey string) newrelic.Config { + return newrelic.NewConfig(applicationName, licenseKey) +} +func New(config newrelic.Config) (*Newrelic, error) { + app, err := newrelic.NewApplication(config) + return &Newrelic{Application: &app}, err +} + +func (n *Newrelic) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + + txn := ((*n.Application).StartTransaction(r.URL.Path, rw, r)).(newrelic.Transaction) + n.Transaction = &txn + defer (*n.Transaction).End() + + // Use if required + // (*n.Transaction).AddAttribute("query", r.URL.RawQuery) + + next(rw, r) +}