From 01363e3cdba7f726996d28e2acbab9e0b3807139 Mon Sep 17 00:00:00 2001 From: Tudor Golubenco Date: Thu, 13 Aug 2015 11:10:32 +0200 Subject: [PATCH 1/2] Vendor the deps with godeps --- Godeps/Godeps.json | 54 + Godeps/Readme | 5 + Godeps/_workspace/.gitignore | 2 + .../src/github.com/elastic/gosigar/.gitignore | 1 + .../github.com/elastic/gosigar/.travis.yml | 8 + .../src/github.com/elastic/gosigar/LICENSE | 201 ++ .../src/github.com/elastic/gosigar/NOTICE | 9 + .../src/github.com/elastic/gosigar/README.md | 22 + .../github.com/elastic/gosigar/Vagrantfile | 25 + .../elastic/gosigar/concrete_sigar.go | 69 + .../elastic/gosigar/concrete_sigar_test.go | 85 + .../github.com/elastic/gosigar/examples/df.go | 39 + .../elastic/gosigar/examples/free.go | 33 + .../github.com/elastic/gosigar/examples/ps.go | 37 + .../elastic/gosigar/examples/uptime.go | 27 + .../elastic/gosigar/fakes/fake_sigar.go | 72 + .../elastic/gosigar/psnotify/README.md | 50 + .../elastic/gosigar/psnotify/psnotify.go | 136 + .../elastic/gosigar/psnotify/psnotify_bsd.go | 93 + .../gosigar/psnotify/psnotify_linux.go | 253 ++ .../elastic/gosigar/psnotify/psnotify_test.go | 283 ++ .../elastic/gosigar/sigar_darwin.go | 467 +++ .../elastic/gosigar/sigar_format.go | 126 + .../elastic/gosigar/sigar_interface.go | 141 + .../elastic/gosigar/sigar_interface_test.go | 135 + .../github.com/elastic/gosigar/sigar_linux.go | 386 +++ .../elastic/gosigar/sigar_linux_test.go | 225 ++ .../elastic/gosigar/sigar_suite_test.go | 13 + .../github.com/elastic/gosigar/sigar_unix.go | 26 + .../github.com/elastic/gosigar/sigar_util.go | 22 + .../elastic/gosigar/sigar_windows.go | 160 + .../elastic/gosigar/sigar_windows_test.go | 32 + .../elastic/libbeat/cfgfile/cfgfile.go | 34 + .../elastic/libbeat/common/bytes.go | 48 + .../elastic/libbeat/common/bytes_test.go | 219 ++ .../github.com/elastic/libbeat/common/csv.go | 35 + .../elastic/libbeat/common/csv_test.go | 46 + .../elastic/libbeat/common/datetime.go | 46 + .../elastic/libbeat/common/datetime_test.go | 101 + .../libbeat/common/droppriv/droppriv_unix.go | 41 + .../common/droppriv/droppriv_windows.go | 18 + .../elastic/libbeat/common/endpoint.go | 10 + .../elastic/libbeat/common/geolite.go | 63 + .../elastic/libbeat/common/mapstr.go | 81 + .../elastic/libbeat/common/mapstr_test.go | 187 ++ .../github.com/elastic/libbeat/common/net.go | 50 + .../elastic/libbeat/common/statuses.go | 7 + .../elastic/libbeat/common/tuples.go | 135 + .../elastic/libbeat/common/tuples_test.go | 71 + .../elastic/libbeat/logp/file_rotator.go | 162 + .../elastic/libbeat/logp/file_rotator_test.go | 161 + .../github.com/elastic/libbeat/logp/log.go | 168 + .../github.com/elastic/libbeat/logp/logp.go | 114 + .../elastic/libbeat/logp/syslog_unix.go | 20 + .../elastic/libbeat/logp/syslog_windows.go | 9 + .../libbeat/outputs/elasticsearch/api.go | 336 ++ .../outputs/elasticsearch/api_mock_test.go | 182 ++ .../libbeat/outputs/elasticsearch/api_test.go | 128 + .../libbeat/outputs/elasticsearch/bulkapi.go | 88 + .../elasticsearch/bulkapi_mock_test.go | 187 ++ .../outputs/elasticsearch/bulkapi_test.go | 183 ++ .../outputs/elasticsearch/connection_pool.go | 114 + .../elasticsearch/connection_pool_test.go | 134 + .../libbeat/outputs/elasticsearch/output.go | 271 ++ .../outputs/elasticsearch/output_test.go | 347 +++ .../elastic/libbeat/outputs/fileout/file.go | 72 + .../elastic/libbeat/outputs/outputs.go | 72 + .../elastic/libbeat/outputs/redis/redis.go | 311 ++ .../libbeat/outputs/redis/redis_test.go | 69 + .../elastic/libbeat/publisher/publish.go | 295 ++ .../elastic/libbeat/service/service.go | 93 + .../elastic/libbeat/service/service_unix.go | 7 + .../libbeat/service/service_windows.go | 48 + .../garyburd/redigo/internal/commandinfo.go | 54 + .../redigo/internal/commandinfo_test.go | 27 + .../redigo/internal/redistest/testdb.go | 65 + .../github.com/garyburd/redigo/redis/conn.go | 457 +++ .../garyburd/redigo/redis/conn_test.go | 542 ++++ .../github.com/garyburd/redigo/redis/doc.go | 169 + .../github.com/garyburd/redigo/redis/log.go | 117 + .../github.com/garyburd/redigo/redis/pool.go | 389 +++ .../garyburd/redigo/redis/pool_test.go | 674 ++++ .../garyburd/redigo/redis/pubsub.go | 144 + .../garyburd/redigo/redis/pubsub_test.go | 150 + .../github.com/garyburd/redigo/redis/redis.go | 44 + .../github.com/garyburd/redigo/redis/reply.go | 364 +++ .../garyburd/redigo/redis/reply_test.go | 166 + .../github.com/garyburd/redigo/redis/scan.go | 513 ++++ .../garyburd/redigo/redis/scan_test.go | 412 +++ .../garyburd/redigo/redis/script.go | 86 + .../garyburd/redigo/redis/script_test.go | 93 + .../garyburd/redigo/redis/test_test.go | 38 + .../redigo/redis/zpop_example_test.go | 113 + .../nranchev/go-libGeoIP/LICENSE.txt | 24 + .../nranchev/go-libGeoIP/README.textile | 46 + .../nranchev/go-libGeoIP/example/example.go | 39 + .../github.com/nranchev/go-libGeoIP/libgeo.go | 354 +++ .../_workspace/src/gopkg.in/yaml.v2/LICENSE | 188 ++ .../src/gopkg.in/yaml.v2/LICENSE.libyaml | 31 + .../_workspace/src/gopkg.in/yaml.v2/README.md | 128 + .../_workspace/src/gopkg.in/yaml.v2/apic.go | 742 +++++ .../_workspace/src/gopkg.in/yaml.v2/decode.go | 683 +++++ .../src/gopkg.in/yaml.v2/decode_test.go | 966 ++++++ .../src/gopkg.in/yaml.v2/emitterc.go | 1685 ++++++++++ .../_workspace/src/gopkg.in/yaml.v2/encode.go | 306 ++ .../src/gopkg.in/yaml.v2/encode_test.go | 501 +++ .../src/gopkg.in/yaml.v2/parserc.go | 1096 +++++++ .../src/gopkg.in/yaml.v2/readerc.go | 391 +++ .../src/gopkg.in/yaml.v2/resolve.go | 203 ++ .../src/gopkg.in/yaml.v2/scannerc.go | 2710 +++++++++++++++++ .../_workspace/src/gopkg.in/yaml.v2/sorter.go | 104 + .../src/gopkg.in/yaml.v2/suite_test.go | 12 + .../src/gopkg.in/yaml.v2/writerc.go | 89 + .../_workspace/src/gopkg.in/yaml.v2/yaml.go | 346 +++ .../_workspace/src/gopkg.in/yaml.v2/yamlh.go | 716 +++++ .../src/gopkg.in/yaml.v2/yamlprivateh.go | 173 ++ 116 files changed, 24150 insertions(+) create mode 100644 Godeps/Godeps.json create mode 100644 Godeps/Readme create mode 100644 Godeps/_workspace/.gitignore create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/.gitignore create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/LICENSE create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/NOTICE create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/README.md create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/Vagrantfile create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/concrete_sigar.go create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/concrete_sigar_test.go create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/examples/df.go create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/examples/free.go create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/examples/ps.go create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/examples/uptime.go create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/fakes/fake_sigar.go create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/psnotify/README.md create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/psnotify/psnotify.go create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/psnotify/psnotify_bsd.go create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/psnotify/psnotify_linux.go create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/psnotify/psnotify_test.go create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/sigar_darwin.go create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/sigar_format.go create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/sigar_interface.go create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/sigar_interface_test.go create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/sigar_linux.go create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/sigar_linux_test.go create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/sigar_suite_test.go create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/sigar_unix.go create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/sigar_util.go create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/sigar_windows.go create mode 100644 Godeps/_workspace/src/github.com/elastic/gosigar/sigar_windows_test.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/cfgfile/cfgfile.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/common/bytes.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/common/bytes_test.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/common/csv.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/common/csv_test.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/common/datetime.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/common/datetime_test.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/common/droppriv/droppriv_unix.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/common/droppriv/droppriv_windows.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/common/endpoint.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/common/geolite.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/common/mapstr.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/common/mapstr_test.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/common/net.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/common/statuses.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/common/tuples.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/common/tuples_test.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/logp/file_rotator.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/logp/file_rotator_test.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/logp/log.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/logp/logp.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/logp/syslog_unix.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/logp/syslog_windows.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/api.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/api_mock_test.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/api_test.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/bulkapi.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/bulkapi_mock_test.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/bulkapi_test.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/connection_pool.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/connection_pool_test.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/output.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/output_test.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/outputs/fileout/file.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/outputs/outputs.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/outputs/redis/redis.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/outputs/redis/redis_test.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/publisher/publish.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/service/service.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/service/service_unix.go create mode 100644 Godeps/_workspace/src/github.com/elastic/libbeat/service/service_windows.go create mode 100644 Godeps/_workspace/src/github.com/garyburd/redigo/internal/commandinfo.go create mode 100644 Godeps/_workspace/src/github.com/garyburd/redigo/internal/commandinfo_test.go create mode 100644 Godeps/_workspace/src/github.com/garyburd/redigo/internal/redistest/testdb.go create mode 100644 Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn.go create mode 100644 Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn_test.go create mode 100644 Godeps/_workspace/src/github.com/garyburd/redigo/redis/doc.go create mode 100644 Godeps/_workspace/src/github.com/garyburd/redigo/redis/log.go create mode 100644 Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool.go create mode 100644 Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool_test.go create mode 100644 Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub.go create mode 100644 Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub_test.go create mode 100644 Godeps/_workspace/src/github.com/garyburd/redigo/redis/redis.go create mode 100644 Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply.go create mode 100644 Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply_test.go create mode 100644 Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan.go create mode 100644 Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan_test.go create mode 100644 Godeps/_workspace/src/github.com/garyburd/redigo/redis/script.go create mode 100644 Godeps/_workspace/src/github.com/garyburd/redigo/redis/script_test.go create mode 100644 Godeps/_workspace/src/github.com/garyburd/redigo/redis/test_test.go create mode 100644 Godeps/_workspace/src/github.com/garyburd/redigo/redis/zpop_example_test.go create mode 100644 Godeps/_workspace/src/github.com/nranchev/go-libGeoIP/LICENSE.txt create mode 100644 Godeps/_workspace/src/github.com/nranchev/go-libGeoIP/README.textile create mode 100644 Godeps/_workspace/src/github.com/nranchev/go-libGeoIP/example/example.go create mode 100644 Godeps/_workspace/src/github.com/nranchev/go-libGeoIP/libgeo.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/README.md create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/decode_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/encode_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/suite_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go create mode 100644 Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json new file mode 100644 index 00000000000..70b554c8f34 --- /dev/null +++ b/Godeps/Godeps.json @@ -0,0 +1,54 @@ +{ + "ImportPath": "github.com/elastic/topbeat", + "GoVersion": "go1.4.2", + "Packages": [ + "./..." + ], + "Deps": [ + { + "ImportPath": "github.com/elastic/gosigar", + "Comment": "scotty_09012012-30-gf3eae4a", + "Rev": "f3eae4adb3b39677a8d543cfdb001a96d5644377" + }, + { + "ImportPath": "github.com/elastic/libbeat/cfgfile", + "Rev": "30ce409977812c2017dcc9ff956485ba6ebd14bb" + }, + { + "ImportPath": "github.com/elastic/libbeat/common", + "Rev": "30ce409977812c2017dcc9ff956485ba6ebd14bb" + }, + { + "ImportPath": "github.com/elastic/libbeat/logp", + "Rev": "30ce409977812c2017dcc9ff956485ba6ebd14bb" + }, + { + "ImportPath": "github.com/elastic/libbeat/outputs", + "Rev": "30ce409977812c2017dcc9ff956485ba6ebd14bb" + }, + { + "ImportPath": "github.com/elastic/libbeat/publisher", + "Rev": "30ce409977812c2017dcc9ff956485ba6ebd14bb" + }, + { + "ImportPath": "github.com/elastic/libbeat/service", + "Rev": "30ce409977812c2017dcc9ff956485ba6ebd14bb" + }, + { + "ImportPath": "github.com/garyburd/redigo/internal", + "Rev": "1a6effc8b0a7bb1b21c683ea5645c4e0fb52f302" + }, + { + "ImportPath": "github.com/garyburd/redigo/redis", + "Rev": "1a6effc8b0a7bb1b21c683ea5645c4e0fb52f302" + }, + { + "ImportPath": "github.com/nranchev/go-libGeoIP", + "Rev": "c78e8bd2dd3599feb21fd30886043979e82fe948" + }, + { + "ImportPath": "gopkg.in/yaml.v2", + "Rev": "7ad95dd0798a40da1ccdff6dff35fd177b5edf40" + } + ] +} diff --git a/Godeps/Readme b/Godeps/Readme new file mode 100644 index 00000000000..4cdaa53d56d --- /dev/null +++ b/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore new file mode 100644 index 00000000000..f037d684ef2 --- /dev/null +++ b/Godeps/_workspace/.gitignore @@ -0,0 +1,2 @@ +/pkg +/bin diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/.gitignore b/Godeps/_workspace/src/github.com/elastic/gosigar/.gitignore new file mode 100644 index 00000000000..8000dd9db47 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/.gitignore @@ -0,0 +1 @@ +.vagrant diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/.travis.yml b/Godeps/_workspace/src/github.com/elastic/gosigar/.travis.yml new file mode 100644 index 00000000000..2a9c5d0c784 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - 1.2 + +install: + - 'go install github.com/onsi/ginkgo/ginkgo' +script: 'ginkgo -r' diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/LICENSE b/Godeps/_workspace/src/github.com/elastic/gosigar/LICENSE new file mode 100644 index 00000000000..11069edd790 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/NOTICE b/Godeps/_workspace/src/github.com/elastic/gosigar/NOTICE new file mode 100644 index 00000000000..fda553b5c38 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/NOTICE @@ -0,0 +1,9 @@ +Copyright (c) [2009-2011] VMware, Inc. All Rights Reserved. + +This product is licensed to you under the Apache License, Version 2.0 (the "License"). +You may not use this product except in compliance with the License. + +This product includes a number of subcomponents with +separate copyright notices and license terms. Your use of these +subcomponents is subject to the terms and conditions of the +subcomponent's license, as noted in the LICENSE file. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/README.md b/Godeps/_workspace/src/github.com/elastic/gosigar/README.md new file mode 100644 index 00000000000..90d51f9b1f6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/README.md @@ -0,0 +1,22 @@ +# Go sigar + +## Overview + +Go sigar is a golang implementation of the +[sigar API](https://github.com/hyperic/sigar). The Go version of +sigar has a very similar interface, but is being written from scratch +in pure go/cgo, rather than cgo bindings for libsigar. + +## Test drive + + $ go get github.com/cloudfoundry/gosigar + $ cd $GOPATH/src/github.com/cloudfoundry/gosigar/examples + $ go run uptime.go + +## Supported platforms + +Currently targeting modern flavors of darwin and linux. + +## License + +Apache 2.0 diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/Vagrantfile b/Godeps/_workspace/src/github.com/elastic/gosigar/Vagrantfile new file mode 100644 index 00000000000..6fd990c1416 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/Vagrantfile @@ -0,0 +1,25 @@ +# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! +VAGRANTFILE_API_VERSION = "2" + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + config.vm.box = "hashicorp/precise64" + config.vm.provision "shell", inline: "mkdir -p /home/vagrant/go" + config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/cloudfoundry/gosigar" + config.vm.provision "shell", inline: "chown -R vagrant:vagrant /home/vagrant/go" + install_go = <<-BASH + set -e + +if [ ! -d "/usr/local/go" ]; then + cd /tmp && wget https://storage.googleapis.com/golang/go1.3.3.linux-amd64.tar.gz + cd /usr/local + tar xvzf /tmp/go1.3.3.linux-amd64.tar.gz + echo 'export GOPATH=/home/vagrant/go; export PATH=/usr/local/go/bin:$PATH:$GOPATH/bin' >> /home/vagrant/.bashrc +fi +export GOPATH=/home/vagrant/go +export PATH=/usr/local/go/bin:$PATH:$GOPATH/bin +/usr/local/go/bin/go get -u github.com/onsi/ginkgo/ginkgo +/usr/local/go/bin/go get -u github.com/onsi/gomega; +BASH + config.vm.provision "shell", inline: 'apt-get install -y git-core' + config.vm.provision "shell", inline: install_go +end diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/concrete_sigar.go b/Godeps/_workspace/src/github.com/elastic/gosigar/concrete_sigar.go new file mode 100644 index 00000000000..0e80aa4b92f --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/concrete_sigar.go @@ -0,0 +1,69 @@ +package sigar + +import ( + "time" +) + +type ConcreteSigar struct{} + +func (c *ConcreteSigar) CollectCpuStats(collectionInterval time.Duration) (<-chan Cpu, chan<- struct{}) { + // samplesCh is buffered to 1 value to immediately return first CPU sample + samplesCh := make(chan Cpu, 1) + + stopCh := make(chan struct{}) + + go func() { + var cpuUsage Cpu + + // Immediately provide non-delta value. + // samplesCh is buffered to 1 value, so it will not block. + cpuUsage.Get() + samplesCh <- cpuUsage + + ticker := time.NewTicker(collectionInterval) + + for { + select { + case <-ticker.C: + previousCpuUsage := cpuUsage + + cpuUsage.Get() + + select { + case samplesCh <- cpuUsage.Delta(previousCpuUsage): + default: + // Include default to avoid channel blocking + } + + case <-stopCh: + return + } + } + }() + + return samplesCh, stopCh +} + +func (c *ConcreteSigar) GetLoadAverage() (LoadAverage, error) { + l := LoadAverage{} + err := l.Get() + return l, err +} + +func (c *ConcreteSigar) GetMem() (Mem, error) { + m := Mem{} + err := m.Get() + return m, err +} + +func (c *ConcreteSigar) GetSwap() (Swap, error) { + s := Swap{} + err := s.Get() + return s, err +} + +func (c *ConcreteSigar) GetFileSystemUsage(path string) (FileSystemUsage, error) { + f := FileSystemUsage{} + err := f.Get(path) + return f, err +} diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/concrete_sigar_test.go b/Godeps/_workspace/src/github.com/elastic/gosigar/concrete_sigar_test.go new file mode 100644 index 00000000000..ec51811c45b --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/concrete_sigar_test.go @@ -0,0 +1,85 @@ +package sigar_test + +import ( + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + sigar "github.com/cloudfoundry/gosigar" +) + +var _ = Describe("ConcreteSigar", func() { + var concreteSigar *sigar.ConcreteSigar + + BeforeEach(func() { + concreteSigar = &sigar.ConcreteSigar{} + }) + + Describe("CollectCpuStats", func() { + It("immediately makes first CPU usage available even though it's not very accurate", func() { + samplesCh, stop := concreteSigar.CollectCpuStats(500 * time.Millisecond) + + firstValue := <-samplesCh + Expect(firstValue.User).To(BeNumerically(">", 0)) + + stop <- struct{}{} + }) + + It("makes CPU usage delta values available", func() { + samplesCh, stop := concreteSigar.CollectCpuStats(500 * time.Millisecond) + + firstValue := <-samplesCh + + secondValue := <-samplesCh + Expect(secondValue.User).To(BeNumerically("<", firstValue.User)) + + stop <- struct{}{} + }) + + It("does not block", func() { + _, stop := concreteSigar.CollectCpuStats(10 * time.Millisecond) + + // Sleep long enough for samplesCh to fill at least 2 values + time.Sleep(20 * time.Millisecond) + + stop <- struct{}{} + + // If CollectCpuStats blocks it will never get here + Expect(true).To(BeTrue()) + }) + }) + + It("GetLoadAverage", func() { + avg, err := concreteSigar.GetLoadAverage() + Expect(avg.One).ToNot(BeNil()) + Expect(avg.Five).ToNot(BeNil()) + Expect(avg.Fifteen).ToNot(BeNil()) + + Expect(err).ToNot(HaveOccurred()) + }) + + It("GetMem", func() { + mem, err := concreteSigar.GetMem() + Expect(err).ToNot(HaveOccurred()) + + Expect(mem.Total).To(BeNumerically(">", 0)) + Expect(mem.Used + mem.Free).To(BeNumerically("<=", mem.Total)) + }) + + It("GetSwap", func() { + swap, err := concreteSigar.GetSwap() + Expect(err).ToNot(HaveOccurred()) + Expect(swap.Used + swap.Free).To(BeNumerically("<=", swap.Total)) + }) + + It("GetSwap", func() { + fsusage, err := concreteSigar.GetFileSystemUsage("/") + Expect(err).ToNot(HaveOccurred()) + Expect(fsusage.Total).ToNot(BeNil()) + + fsusage, err = concreteSigar.GetFileSystemUsage("T O T A L L Y B O G U S") + Expect(err).To(HaveOccurred()) + Expect(fsusage.Total).To(Equal(uint64(0))) + }) +}) diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/examples/df.go b/Godeps/_workspace/src/github.com/elastic/gosigar/examples/df.go new file mode 100644 index 00000000000..96c92f41d26 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/examples/df.go @@ -0,0 +1,39 @@ +// Copyright (c) 2012 VMware, Inc. + +package main + +import ( + "fmt" + "github.com/cloudfoundry/gosigar" + "os" +) + +const output_format = "%-15s %4s %4s %5s %4s %-15s\n" + +func formatSize(size uint64) string { + return sigar.FormatSize(size * 1024) +} + +func main() { + fslist := sigar.FileSystemList{} + fslist.Get() + + fmt.Fprintf(os.Stdout, output_format, + "Filesystem", "Size", "Used", "Avail", "Use%", "Mounted on") + + for _, fs := range fslist.List { + dir_name := fs.DirName + + usage := sigar.FileSystemUsage{} + + usage.Get(dir_name) + + fmt.Fprintf(os.Stdout, output_format, + fs.DevName, + formatSize(usage.Total), + formatSize(usage.Used), + formatSize(usage.Avail), + sigar.FormatPercent(usage.UsePercent()), + dir_name) + } +} diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/examples/free.go b/Godeps/_workspace/src/github.com/elastic/gosigar/examples/free.go new file mode 100644 index 00000000000..9bf9d3db306 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/examples/free.go @@ -0,0 +1,33 @@ +// Copyright (c) 2012 VMware, Inc. + +package main + +import ( + "fmt" + "github.com/cloudfoundry/gosigar" + "os" +) + +func format(val uint64) uint64 { + return val / 1024 +} + +func main() { + mem := sigar.Mem{} + swap := sigar.Swap{} + + mem.Get() + swap.Get() + + fmt.Fprintf(os.Stdout, "%18s %10s %10s\n", + "total", "used", "free") + + fmt.Fprintf(os.Stdout, "Mem: %10d %10d %10d\n", + format(mem.Total), format(mem.Used), format(mem.Free)) + + fmt.Fprintf(os.Stdout, "-/+ buffers/cache: %10d %10d\n", + format(mem.ActualUsed), format(mem.ActualFree)) + + fmt.Fprintf(os.Stdout, "Swap: %10d %10d %10d\n", + format(swap.Total), format(swap.Used), format(swap.Free)) +} diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/examples/ps.go b/Godeps/_workspace/src/github.com/elastic/gosigar/examples/ps.go new file mode 100644 index 00000000000..e3cc2281f0f --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/examples/ps.go @@ -0,0 +1,37 @@ +// Copyright (c) 2012 VMware, Inc. + +package main + +import ( + "fmt" + "github.com/cloudfoundry/gosigar" +) + +func main() { + pids := sigar.ProcList{} + pids.Get() + + // ps -eo pid,ppid,stime,time,rss,state,comm + fmt.Print(" PID PPID STIME TIME RSS S COMMAND\n") + + for _, pid := range pids.List { + state := sigar.ProcState{} + mem := sigar.ProcMem{} + time := sigar.ProcTime{} + + if err := state.Get(pid); err != nil { + continue + } + if err := mem.Get(pid); err != nil { + continue + } + if err := time.Get(pid); err != nil { + continue + } + + fmt.Printf("%5d %5d %s %s %6d %c %s\n", + pid, state.Ppid, + time.FormatStartTime(), time.FormatTotal(), + mem.Resident/1024, state.State, state.Name) + } +} diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/examples/uptime.go b/Godeps/_workspace/src/github.com/elastic/gosigar/examples/uptime.go new file mode 100644 index 00000000000..337a9b01a45 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/examples/uptime.go @@ -0,0 +1,27 @@ +// Copyright (c) 2012 VMware, Inc. + +package main + +import ( + "fmt" + "github.com/cloudfoundry/gosigar" + "os" + "time" +) + +func main() { + concreteSigar := sigar.ConcreteSigar{} + + uptime := sigar.Uptime{} + uptime.Get() + avg, err := concreteSigar.GetLoadAverage() + if err != nil { + fmt.Printf("Failed to get load average") + return + } + + fmt.Fprintf(os.Stdout, " %s up %s load average: %.2f, %.2f, %.2f\n", + time.Now().Format("15:04:05"), + uptime.Format(), + avg.One, avg.Five, avg.Fifteen) +} diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/fakes/fake_sigar.go b/Godeps/_workspace/src/github.com/elastic/gosigar/fakes/fake_sigar.go new file mode 100644 index 00000000000..6fb77417c46 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/fakes/fake_sigar.go @@ -0,0 +1,72 @@ +package fakes + +import ( + "time" + + sigar "github.com/cloudfoundry/gosigar" +) + +type FakeSigar struct { + LoadAverage sigar.LoadAverage + LoadAverageErr error + + Mem sigar.Mem + MemErr error + + Swap sigar.Swap + SwapErr error + + FileSystemUsage sigar.FileSystemUsage + FileSystemUsageErr error + FileSystemUsagePath string + + CollectCpuStatsCpuCh chan sigar.Cpu + CollectCpuStatsStopCh chan struct{} +} + +func NewFakeSigar() *FakeSigar { + return &FakeSigar{ + CollectCpuStatsCpuCh: make(chan sigar.Cpu, 1), + CollectCpuStatsStopCh: make(chan struct{}), + } +} + +func (f *FakeSigar) CollectCpuStats(collectionInterval time.Duration) (<-chan sigar.Cpu, chan<- struct{}) { + samplesCh := make(chan sigar.Cpu, 1) + stopCh := make(chan struct{}) + + go func() { + for { + select { + case cpuStat := <-f.CollectCpuStatsCpuCh: + select { + case samplesCh <- cpuStat: + default: + // Include default to avoid channel blocking + } + + case <-f.CollectCpuStatsStopCh: + return + } + } + }() + + return samplesCh, stopCh +} + +func (f *FakeSigar) GetLoadAverage() (sigar.LoadAverage, error) { + return f.LoadAverage, f.LoadAverageErr +} + +func (f *FakeSigar) GetMem() (sigar.Mem, error) { + return f.Mem, f.MemErr +} + +func (f *FakeSigar) GetSwap() (sigar.Swap, error) { + return f.Swap, f.SwapErr +} + +func (f *FakeSigar) GetFileSystemUsage(path string) (sigar.FileSystemUsage, error) { + f.FileSystemUsagePath = path + return f.FileSystemUsage, f.FileSystemUsageErr +} diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/psnotify/README.md b/Godeps/_workspace/src/github.com/elastic/gosigar/psnotify/README.md new file mode 100644 index 00000000000..dd34ebcfbe9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/psnotify/README.md @@ -0,0 +1,50 @@ +# Process notifications for Go + +## Overview + +The psnotify package captures process events from the kernel via +kqueue on Darwin/BSD and the netlink connector on Linux. + +The psnotify API is similar to the +[fsnotify](https://github.com/howeyc/fsnotify) package. + +Example: +```go + watcher, err := psnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + + // Process events + go func() { + for { + select { + case ev := <-watcher.Fork: + log.Println("fork event:", ev) + case ev := <-watcher.Exec: + log.Println("exec event:", ev) + case ev := <-watcher.Exit: + log.Println("exit event:", ev) + case err := <-watcher.Error: + log.Println("error:", err) + } + } + }() + + err = watcher.Watch(os.Getpid(), psnotify.PROC_EVENT_ALL) + if err != nil { + log.Fatal(err) + } + + /* ... do stuff ... */ + watcher.Close() +``` + +## Supported platforms + +Currently targeting modern flavors of Darwin and Linux. +Should work on BSD, but untested. + +## License + +Apache 2.0 diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/psnotify/psnotify.go b/Godeps/_workspace/src/github.com/elastic/gosigar/psnotify/psnotify.go new file mode 100644 index 00000000000..6a69f4de26c --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/psnotify/psnotify.go @@ -0,0 +1,136 @@ +// Copyright (c) 2012 VMware, Inc. + +package psnotify + +import ( + "errors" + "fmt" +) + +type ProcEventFork struct { + ParentPid int // Pid of the process that called fork() + ChildPid int // Child process pid created by fork() +} + +type ProcEventExec struct { + Pid int // Pid of the process that called exec() +} + +type ProcEventExit struct { + Pid int // Pid of the process that called exit() +} + +type watch struct { + flags uint32 // Saved value of Watch() flags param +} + +type eventListener interface { + close() error // Watch.Close() closes the OS specific listener +} + +type Watcher struct { + listener eventListener // OS specifics (kqueue or netlink) + watches map[int]*watch // Map of watched process ids + Error chan error // Errors are sent on this channel + Fork chan *ProcEventFork // Fork events are sent on this channel + Exec chan *ProcEventExec // Exec events are sent on this channel + Exit chan *ProcEventExit // Exit events are sent on this channel + done chan bool // Used to stop the readEvents() goroutine + isClosed bool // Set to true when Close() is first called +} + +// Initialize event listener and channels +func NewWatcher() (*Watcher, error) { + listener, err := createListener() + + if err != nil { + return nil, err + } + + w := &Watcher{ + listener: listener, + watches: make(map[int]*watch), + Fork: make(chan *ProcEventFork), + Exec: make(chan *ProcEventExec), + Exit: make(chan *ProcEventExit), + Error: make(chan error), + done: make(chan bool, 1), + } + + go w.readEvents() + return w, nil +} + +// Close event channels when done message is received +func (w *Watcher) finish() { + close(w.Fork) + close(w.Exec) + close(w.Exit) + close(w.Error) +} + +// Closes the OS specific event listener, +// removes all watches and closes all event channels. +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + for pid := range w.watches { + w.RemoveWatch(pid) + } + + w.done <- true + + w.listener.close() + + return nil +} + +// Add pid to the watched process set. +// The flags param is a bitmask of process events to capture, +// must be one or more of: PROC_EVENT_FORK, PROC_EVENT_EXEC, PROC_EVENT_EXIT +func (w *Watcher) Watch(pid int, flags uint32) error { + if w.isClosed { + return errors.New("psnotify watcher is closed") + } + + watchEntry, found := w.watches[pid] + + if found { + watchEntry.flags |= flags + } else { + if err := w.register(pid, flags); err != nil { + return err + } + w.watches[pid] = &watch{flags: flags} + } + + return nil +} + +// Remove pid from the watched process set. +func (w *Watcher) RemoveWatch(pid int) error { + _, ok := w.watches[pid] + if !ok { + msg := fmt.Sprintf("watch for pid=%d does not exist", pid) + return errors.New(msg) + } + delete(w.watches, pid) + return w.unregister(pid) +} + +// Internal helper to check if there is a message on the "done" channel. +// The "done" message is sent by the Close() method; when received here, +// the Watcher.finish method is called to close all channels and return +// true - in which case the caller should break from the readEvents loop. +func (w *Watcher) isDone() bool { + var done bool + select { + case done = <-w.done: + w.finish() + default: + } + return done +} diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/psnotify/psnotify_bsd.go b/Godeps/_workspace/src/github.com/elastic/gosigar/psnotify/psnotify_bsd.go new file mode 100644 index 00000000000..e147d763855 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/psnotify/psnotify_bsd.go @@ -0,0 +1,93 @@ +// Copyright (c) 2012 VMware, Inc. + +// +build darwin freebsd netbsd openbsd + +// Go interface to BSD kqueue process events. +package psnotify + +import ( + "syscall" +) + +const ( + // Flags (from ) + PROC_EVENT_FORK = syscall.NOTE_FORK // fork() events + PROC_EVENT_EXEC = syscall.NOTE_EXEC // exec() events + PROC_EVENT_EXIT = syscall.NOTE_EXIT // exit() events + + // Watch for all process events + PROC_EVENT_ALL = PROC_EVENT_FORK | PROC_EVENT_EXEC | PROC_EVENT_EXIT +) + +type kqueueListener struct { + kq int // The syscall.Kqueue() file descriptor + buf [1]syscall.Kevent_t // An event buffer for Add/Remove watch +} + +// Initialize bsd implementation of the eventListener interface +func createListener() (eventListener, error) { + listener := &kqueueListener{} + kq, err := syscall.Kqueue() + listener.kq = kq + return listener, err +} + +// Initialize Kevent_t fields and propagate changelist for the given pid +func (w *Watcher) kevent(pid int, fflags uint32, flags int) error { + listener, _ := w.listener.(*kqueueListener) + event := &listener.buf[0] + + syscall.SetKevent(event, pid, syscall.EVFILT_PROC, flags) + event.Fflags = fflags + + _, err := syscall.Kevent(listener.kq, listener.buf[:], nil, nil) + + return err +} + +// Delete filter for given pid from the queue +func (w *Watcher) unregister(pid int) error { + return w.kevent(pid, 0, syscall.EV_DELETE) +} + +// Add and enable filter for given pid in the queue +func (w *Watcher) register(pid int, flags uint32) error { + return w.kevent(pid, flags, syscall.EV_ADD|syscall.EV_ENABLE) +} + +// Poll the kqueue file descriptor and dispatch to the Event channels +func (w *Watcher) readEvents() { + listener, _ := w.listener.(*kqueueListener) + events := make([]syscall.Kevent_t, 10) + + for { + if w.isDone() { + return + } + + n, err := syscall.Kevent(listener.kq, nil, events, nil) + if err != nil { + w.Error <- err + continue + } + + for _, ev := range events[:n] { + pid := int(ev.Ident) + + switch ev.Fflags { + case syscall.NOTE_FORK: + w.Fork <- &ProcEventFork{ParentPid: pid} + case syscall.NOTE_EXEC: + w.Exec <- &ProcEventExec{Pid: pid} + case syscall.NOTE_EXIT: + w.RemoveWatch(pid) + w.Exit <- &ProcEventExit{Pid: pid} + } + } + } +} + +// Close our kqueue file descriptor; deletes any remaining filters +func (listener *kqueueListener) close() error { + return syscall.Close(listener.kq) +} diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/psnotify/psnotify_linux.go b/Godeps/_workspace/src/github.com/elastic/gosigar/psnotify/psnotify_linux.go new file mode 100644 index 00000000000..f9154ef3deb --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/psnotify/psnotify_linux.go @@ -0,0 +1,253 @@ +// Copyright (c) 2012 VMware, Inc. + +// Go interface to the Linux netlink process connector. +// See Documentation/connector/connector.txt in the linux kernel source tree. +package psnotify + +import ( + "bytes" + "encoding/binary" + "os" + "syscall" +) + +const ( + // internal flags (from ) + _CN_IDX_PROC = 0x1 + _CN_VAL_PROC = 0x1 + + // internal flags (from ) + _PROC_CN_MCAST_LISTEN = 1 + _PROC_CN_MCAST_IGNORE = 2 + + // Flags (from ) + PROC_EVENT_FORK = 0x00000001 // fork() events + PROC_EVENT_EXEC = 0x00000002 // exec() events + PROC_EVENT_EXIT = 0x80000000 // exit() events + + // Watch for all process events + PROC_EVENT_ALL = PROC_EVENT_FORK | PROC_EVENT_EXEC | PROC_EVENT_EXIT +) + +var ( + byteOrder = binary.LittleEndian +) + +// linux/connector.h: struct cb_id +type cbId struct { + Idx uint32 + Val uint32 +} + +// linux/connector.h: struct cb_msg +type cnMsg struct { + Id cbId + Seq uint32 + Ack uint32 + Len uint16 + Flags uint16 +} + +// linux/cn_proc.h: struct proc_event.{what,cpu,timestamp_ns} +type procEventHeader struct { + What uint32 + Cpu uint32 + Timestamp uint64 +} + +// linux/cn_proc.h: struct proc_event.fork +type forkProcEvent struct { + ParentPid uint32 + ParentTgid uint32 + ChildPid uint32 + ChildTgid uint32 +} + +// linux/cn_proc.h: struct proc_event.exec +type execProcEvent struct { + ProcessPid uint32 + ProcessTgid uint32 +} + +// linux/cn_proc.h: struct proc_event.exit +type exitProcEvent struct { + ProcessPid uint32 + ProcessTgid uint32 + ExitCode uint32 + ExitSignal uint32 +} + +// standard netlink header + connector header +type netlinkProcMessage struct { + Header syscall.NlMsghdr + Data cnMsg +} + +type netlinkListener struct { + addr *syscall.SockaddrNetlink // Netlink socket address + sock int // The syscall.Socket() file descriptor + seq uint32 // struct cn_msg.seq +} + +// Initialize linux implementation of the eventListener interface +func createListener() (eventListener, error) { + listener := &netlinkListener{} + err := listener.bind() + return listener, err +} + +// noop on linux +func (w *Watcher) unregister(pid int) error { + return nil +} + +// noop on linux +func (w *Watcher) register(pid int, flags uint32) error { + return nil +} + +// Read events from the netlink socket +func (w *Watcher) readEvents() { + buf := make([]byte, syscall.Getpagesize()) + + listener, _ := w.listener.(*netlinkListener) + + for { + if w.isDone() { + return + } + + nr, _, err := syscall.Recvfrom(listener.sock, buf, 0) + + if err != nil { + w.Error <- err + continue + } + if nr < syscall.NLMSG_HDRLEN { + w.Error <- syscall.EINVAL + continue + } + + msgs, _ := syscall.ParseNetlinkMessage(buf[:nr]) + + for _, m := range msgs { + if m.Header.Type == syscall.NLMSG_DONE { + w.handleEvent(m.Data) + } + } + } +} + +// Internal helper to check if pid && event is being watched +func (w *Watcher) isWatching(pid int, event uint32) bool { + if watch, ok := w.watches[pid]; ok { + return (watch.flags & event) == event + } + return false +} + +// Dispatch events from the netlink socket to the Event channels. +// Unlike bsd kqueue, netlink receives events for all pids, +// so we apply filtering based on the watch table via isWatching() +func (w *Watcher) handleEvent(data []byte) { + buf := bytes.NewBuffer(data) + msg := &cnMsg{} + hdr := &procEventHeader{} + + binary.Read(buf, byteOrder, msg) + binary.Read(buf, byteOrder, hdr) + + switch hdr.What { + case PROC_EVENT_FORK: + event := &forkProcEvent{} + binary.Read(buf, byteOrder, event) + ppid := int(event.ParentTgid) + pid := int(event.ChildTgid) + + if w.isWatching(ppid, PROC_EVENT_EXEC) { + // follow forks + watch, _ := w.watches[ppid] + w.Watch(pid, watch.flags) + } + + if w.isWatching(ppid, PROC_EVENT_FORK) { + w.Fork <- &ProcEventFork{ParentPid: ppid, ChildPid: pid} + } + case PROC_EVENT_EXEC: + event := &execProcEvent{} + binary.Read(buf, byteOrder, event) + pid := int(event.ProcessTgid) + + if w.isWatching(pid, PROC_EVENT_EXEC) { + w.Exec <- &ProcEventExec{Pid: pid} + } + case PROC_EVENT_EXIT: + event := &exitProcEvent{} + binary.Read(buf, byteOrder, event) + pid := int(event.ProcessTgid) + + if w.isWatching(pid, PROC_EVENT_EXIT) { + w.RemoveWatch(pid) + w.Exit <- &ProcEventExit{Pid: pid} + } + } +} + +// Bind our netlink socket and +// send a listen control message to the connector driver. +func (listener *netlinkListener) bind() error { + sock, err := syscall.Socket( + syscall.AF_NETLINK, + syscall.SOCK_DGRAM, + syscall.NETLINK_CONNECTOR) + + if err != nil { + return err + } + + listener.sock = sock + listener.addr = &syscall.SockaddrNetlink{ + Family: syscall.AF_NETLINK, + Groups: _CN_IDX_PROC, + } + + err = syscall.Bind(listener.sock, listener.addr) + + if err != nil { + return err + } + + return listener.send(_PROC_CN_MCAST_LISTEN) +} + +// Send an ignore control message to the connector driver +// and close our netlink socket. +func (listener *netlinkListener) close() error { + err := listener.send(_PROC_CN_MCAST_IGNORE) + syscall.Close(listener.sock) + return err +} + +// Generic method for sending control messages to the connector +// driver; where op is one of PROC_CN_MCAST_{LISTEN,IGNORE} +func (listener *netlinkListener) send(op uint32) error { + listener.seq++ + pr := &netlinkProcMessage{} + plen := binary.Size(pr.Data) + binary.Size(op) + pr.Header.Len = syscall.NLMSG_HDRLEN + uint32(plen) + pr.Header.Type = uint16(syscall.NLMSG_DONE) + pr.Header.Flags = 0 + pr.Header.Seq = listener.seq + pr.Header.Pid = uint32(os.Getpid()) + + pr.Data.Id.Idx = _CN_IDX_PROC + pr.Data.Id.Val = _CN_VAL_PROC + + pr.Data.Len = uint16(binary.Size(op)) + + buf := bytes.NewBuffer(make([]byte, 0, pr.Header.Len)) + binary.Write(buf, byteOrder, pr) + binary.Write(buf, byteOrder, op) + + return syscall.Sendto(listener.sock, buf.Bytes(), 0, listener.addr) +} diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/psnotify/psnotify_test.go b/Godeps/_workspace/src/github.com/elastic/gosigar/psnotify/psnotify_test.go new file mode 100644 index 00000000000..28f38a8d7d7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/psnotify/psnotify_test.go @@ -0,0 +1,283 @@ +// Copyright (c) 2012 VMware, Inc. + +package psnotify + +import ( + "fmt" + "os" + "os/exec" + "runtime" + "syscall" + "testing" + "time" +) + +type anyEvent struct { + exits []int + forks []int + execs []int + errors []error + done chan bool +} + +type testWatcher struct { + t *testing.T + watcher *Watcher + events *anyEvent +} + +// General purpose Watcher wrapper for all tests +func newTestWatcher(t *testing.T) *testWatcher { + watcher, err := NewWatcher() + if err != nil { + t.Fatal(err) + } + + events := &anyEvent{ + done: make(chan bool, 1), + } + + tw := &testWatcher{ + t: t, + watcher: watcher, + events: events, + } + + go func() { + for { + select { + case <-events.done: + return + case ev := <-watcher.Fork: + events.forks = append(events.forks, ev.ParentPid) + case ev := <-watcher.Exec: + events.execs = append(events.execs, ev.Pid) + case ev := <-watcher.Exit: + events.exits = append(events.exits, ev.Pid) + case err := <-watcher.Error: + events.errors = append(events.errors, err) + } + } + }() + + return tw +} + +func (tw *testWatcher) close() { + pause := 100 * time.Millisecond + time.Sleep(pause) + + tw.events.done <- true + + tw.watcher.Close() + + time.Sleep(pause) +} + +func skipTest(t *testing.T) bool { + if runtime.GOOS == "linux" && os.Getuid() != 0 { + fmt.Println("SKIP: test must be run as root on linux") + return true + } + return false +} + +func startSleepCommand(t *testing.T) *exec.Cmd { + cmd := exec.Command("sh", "-c", "sleep 100") + if err := cmd.Start(); err != nil { + t.Error(err) + } + return cmd +} + +func runCommand(t *testing.T, name string) *exec.Cmd { + cmd := exec.Command(name) + if err := cmd.Run(); err != nil { + t.Error(err) + } + return cmd +} + +func expectEvents(t *testing.T, num int, name string, pids []int) bool { + if len(pids) != num { + t.Errorf("Expected %d %s events, got=%v", num, name, pids) + return false + } + return true +} + +func expectEventPid(t *testing.T, name string, expect int, pid int) bool { + if expect != pid { + t.Errorf("Expected %s pid=%d, received=%d", name, expect, pid) + return false + } + return true +} + +func TestWatchFork(t *testing.T) { + if skipTest(t) { + return + } + + pid := os.Getpid() + + tw := newTestWatcher(t) + + // no watches added yet, so this fork event will no be captured + runCommand(t, "date") + + // watch fork events for this process + if err := tw.watcher.Watch(pid, PROC_EVENT_FORK); err != nil { + t.Error(err) + } + + // this fork event will be captured, + // the exec and exit events will not be captured + runCommand(t, "cal") + + tw.close() + + if expectEvents(t, 1, "forks", tw.events.forks) { + expectEventPid(t, "fork", pid, tw.events.forks[0]) + } + + expectEvents(t, 0, "execs", tw.events.execs) + expectEvents(t, 0, "exits", tw.events.exits) +} + +func TestWatchExit(t *testing.T) { + if skipTest(t) { + return + } + + tw := newTestWatcher(t) + + cmd := startSleepCommand(t) + + childPid := cmd.Process.Pid + + // watch for exit event of our child process + if err := tw.watcher.Watch(childPid, PROC_EVENT_EXIT); err != nil { + t.Error(err) + } + + // kill our child process, triggers exit event + syscall.Kill(childPid, syscall.SIGTERM) + + cmd.Wait() + + tw.close() + + expectEvents(t, 0, "forks", tw.events.forks) + + expectEvents(t, 0, "execs", tw.events.execs) + + if expectEvents(t, 1, "exits", tw.events.exits) { + expectEventPid(t, "exit", childPid, tw.events.exits[0]) + } +} + +// combined version of TestWatchFork() and TestWatchExit() +func TestWatchForkAndExit(t *testing.T) { + if skipTest(t) { + return + } + + pid := os.Getpid() + + tw := newTestWatcher(t) + + if err := tw.watcher.Watch(pid, PROC_EVENT_FORK); err != nil { + t.Error(err) + } + + cmd := startSleepCommand(t) + + childPid := cmd.Process.Pid + + if err := tw.watcher.Watch(childPid, PROC_EVENT_EXIT); err != nil { + t.Error(err) + } + + syscall.Kill(childPid, syscall.SIGTERM) + + cmd.Wait() + + tw.close() + + if expectEvents(t, 1, "forks", tw.events.forks) { + expectEventPid(t, "fork", pid, tw.events.forks[0]) + } + + expectEvents(t, 0, "execs", tw.events.execs) + + if expectEvents(t, 1, "exits", tw.events.exits) { + expectEventPid(t, "exit", childPid, tw.events.exits[0]) + } +} + +func TestWatchFollowFork(t *testing.T) { + if skipTest(t) { + return + } + + // Darwin is not able to follow forks, as the kqueue fork event + // does not provide the child pid. + if runtime.GOOS != "linux" { + fmt.Println("SKIP: test follow forks is linux only") + return + } + + pid := os.Getpid() + + tw := newTestWatcher(t) + + // watch for all process events related to this process + if err := tw.watcher.Watch(pid, PROC_EVENT_ALL); err != nil { + t.Error(err) + } + + commands := []string{"date", "cal"} + childPids := make([]int, len(commands)) + + // triggers fork/exec/exit events for each command + for i, name := range commands { + cmd := runCommand(t, name) + childPids[i] = cmd.Process.Pid + } + + // remove watch for this process + tw.watcher.RemoveWatch(pid) + + // run commands again to make sure we don't receive any unwanted events + for _, name := range commands { + runCommand(t, name) + } + + tw.close() + + // run commands again to make sure nothing panics after + // closing the watcher + for _, name := range commands { + runCommand(t, name) + } + + num := len(commands) + if expectEvents(t, num, "forks", tw.events.forks) { + for _, epid := range tw.events.forks { + expectEventPid(t, "fork", pid, epid) + } + } + + if expectEvents(t, num, "execs", tw.events.execs) { + for i, epid := range tw.events.execs { + expectEventPid(t, "exec", childPids[i], epid) + } + } + + if expectEvents(t, num, "exits", tw.events.exits) { + for i, epid := range tw.events.exits { + expectEventPid(t, "exit", childPids[i], epid) + } + } +} diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_darwin.go b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_darwin.go new file mode 100644 index 00000000000..e3a8c4b9c32 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_darwin.go @@ -0,0 +1,467 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +/* +#include +#include +#include +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "syscall" + "time" + "unsafe" +) + +func (self *LoadAverage) Get() error { + avg := []C.double{0, 0, 0} + + C.getloadavg(&avg[0], C.int(len(avg))) + + self.One = float64(avg[0]) + self.Five = float64(avg[1]) + self.Fifteen = float64(avg[2]) + + return nil +} + +func (self *Uptime) Get() error { + tv := syscall.Timeval32{} + + if err := sysctlbyname("kern.boottime", &tv); err != nil { + return err + } + + self.Length = time.Since(time.Unix(int64(tv.Sec), int64(tv.Usec)*1000)).Seconds() + + return nil +} + +func (self *Mem) Get() error { + var vmstat C.vm_statistics_data_t + + if err := sysctlbyname("hw.memsize", &self.Total); err != nil { + return err + } + + if err := vm_info(&vmstat); err != nil { + return err + } + + kern := uint64(vmstat.inactive_count) << 12 + self.Free = uint64(vmstat.free_count) << 12 + + self.Used = self.Total - self.Free + self.ActualFree = self.Free + kern + self.ActualUsed = self.Used - kern + + return nil +} + +type xsw_usage struct { + Total, Avail, Used uint64 +} + +func (self *Swap) Get() error { + sw_usage := xsw_usage{} + + if err := sysctlbyname("vm.swapusage", &sw_usage); err != nil { + return err + } + + self.Total = sw_usage.Total + self.Used = sw_usage.Used + self.Free = sw_usage.Avail + + return nil +} + +func (self *Cpu) Get() error { + var count C.mach_msg_type_number_t = C.HOST_CPU_LOAD_INFO_COUNT + var cpuload C.host_cpu_load_info_data_t + + status := C.host_statistics(C.host_t(C.mach_host_self()), + C.HOST_CPU_LOAD_INFO, + C.host_info_t(unsafe.Pointer(&cpuload)), + &count) + + if status != C.KERN_SUCCESS { + return fmt.Errorf("host_statistics error=%d", status) + } + + self.User = uint64(cpuload.cpu_ticks[C.CPU_STATE_USER]) + self.Sys = uint64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM]) + self.Idle = uint64(cpuload.cpu_ticks[C.CPU_STATE_IDLE]) + self.Nice = uint64(cpuload.cpu_ticks[C.CPU_STATE_NICE]) + + return nil +} + +func (self *CpuList) Get() error { + var count C.mach_msg_type_number_t + var cpuload *C.processor_cpu_load_info_data_t + var ncpu C.natural_t + + status := C.host_processor_info(C.host_t(C.mach_host_self()), + C.PROCESSOR_CPU_LOAD_INFO, + &ncpu, + (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)), + &count) + + if status != C.KERN_SUCCESS { + return fmt.Errorf("host_processor_info error=%d", status) + } + + // jump through some cgo casting hoops and ensure we properly free + // the memory that cpuload points to + target := C.vm_map_t(C.mach_task_self_) + address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload))) + defer C.vm_deallocate(target, address, C.vm_size_t(ncpu)) + + // the body of struct processor_cpu_load_info + // aka processor_cpu_load_info_data_t + var cpu_ticks [C.CPU_STATE_MAX]uint32 + + // copy the cpuload array to a []byte buffer + // where we can binary.Read the data + size := int(ncpu) * binary.Size(cpu_ticks) + buf := C.GoBytes(unsafe.Pointer(cpuload), C.int(size)) + + bbuf := bytes.NewBuffer(buf) + + self.List = make([]Cpu, 0, ncpu) + + for i := 0; i < int(ncpu); i++ { + cpu := Cpu{} + + err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks) + if err != nil { + return err + } + + cpu.User = uint64(cpu_ticks[C.CPU_STATE_USER]) + cpu.Sys = uint64(cpu_ticks[C.CPU_STATE_SYSTEM]) + cpu.Idle = uint64(cpu_ticks[C.CPU_STATE_IDLE]) + cpu.Nice = uint64(cpu_ticks[C.CPU_STATE_NICE]) + + self.List = append(self.List, cpu) + } + + return nil +} + +func (self *FileSystemList) Get() error { + num, err := getfsstat(nil, C.MNT_NOWAIT) + if num < 0 { + return err + } + + buf := make([]syscall.Statfs_t, num) + + num, err = getfsstat(buf, C.MNT_NOWAIT) + if err != nil { + return err + } + + fslist := make([]FileSystem, 0, num) + + for i := 0; i < num; i++ { + fs := FileSystem{} + + fs.DirName = bytePtrToString(&buf[i].Mntonname[0]) + fs.DevName = bytePtrToString(&buf[i].Mntfromname[0]) + fs.SysTypeName = bytePtrToString(&buf[i].Fstypename[0]) + + fslist = append(fslist, fs) + } + + self.List = fslist + + return err +} + +func (self *ProcList) Get() error { + n := C.proc_listpids(C.PROC_ALL_PIDS, 0, nil, 0) + if n <= 0 { + return syscall.EINVAL + } + buf := make([]byte, n) + n = C.proc_listpids(C.PROC_ALL_PIDS, 0, unsafe.Pointer(&buf[0]), n) + if n <= 0 { + return syscall.ENOMEM + } + + var pid int32 + num := int(n) / binary.Size(pid) + list := make([]int, 0, num) + bbuf := bytes.NewBuffer(buf) + + for i := 0; i < num; i++ { + if err := binary.Read(bbuf, binary.LittleEndian, &pid); err != nil { + return err + } + if pid == 0 { + continue + } + + list = append(list, int(pid)) + } + + self.List = list + + return nil +} + +func (self *ProcState) Get(pid int) error { + info := C.struct_proc_taskallinfo{} + + if err := task_info(pid, &info); err != nil { + return err + } + + self.Name = C.GoString(&info.pbsd.pbi_comm[0]) + + switch info.pbsd.pbi_status { + case C.SIDL: + self.State = RunStateIdle + case C.SRUN: + self.State = RunStateRun + case C.SSLEEP: + self.State = RunStateSleep + case C.SSTOP: + self.State = RunStateStop + case C.SZOMB: + self.State = RunStateZombie + default: + self.State = RunStateUnknown + } + + self.Ppid = int(info.pbsd.pbi_ppid) + + self.Tty = int(info.pbsd.e_tdev) + + self.Priority = int(info.ptinfo.pti_priority) + + self.Nice = int(info.pbsd.pbi_nice) + + return nil +} + +func (self *ProcMem) Get(pid int) error { + info := C.struct_proc_taskallinfo{} + + if err := task_info(pid, &info); err != nil { + return err + } + + self.Size = uint64(info.ptinfo.pti_virtual_size) + self.Resident = uint64(info.ptinfo.pti_resident_size) + self.PageFaults = uint64(info.ptinfo.pti_faults) + + return nil +} + +func (self *ProcTime) Get(pid int) error { + info := C.struct_proc_taskallinfo{} + + if err := task_info(pid, &info); err != nil { + return err + } + + self.User = + uint64(info.ptinfo.pti_total_user) / uint64(time.Millisecond) + + self.Sys = + uint64(info.ptinfo.pti_total_system) / uint64(time.Millisecond) + + self.Total = self.User + self.Sys + + self.StartTime = (uint64(info.pbsd.pbi_start_tvsec) * 1000) + + (uint64(info.pbsd.pbi_start_tvusec) / 1000) + + return nil +} + +func (self *ProcArgs) Get(pid int) error { + var args []string + + argv := func(arg string) { + args = append(args, arg) + } + + err := kern_procargs(pid, nil, argv, nil) + + self.List = args + + return err +} + +func (self *ProcExe) Get(pid int) error { + exe := func(arg string) { + self.Name = arg + } + + return kern_procargs(pid, exe, nil, nil) +} + +// wrapper around sysctl KERN_PROCARGS2 +// callbacks params are optional, +// up to the caller as to which pieces of data they want +func kern_procargs(pid int, + exe func(string), + argv func(string), + env func(string, string)) error { + + mib := []C.int{C.CTL_KERN, C.KERN_PROCARGS2, C.int(pid)} + argmax := uintptr(C.ARG_MAX) + buf := make([]byte, argmax) + err := sysctl(mib, &buf[0], &argmax, nil, 0) + if err != nil { + return nil + } + + bbuf := bytes.NewBuffer(buf) + bbuf.Truncate(int(argmax)) + + var argc int32 + binary.Read(bbuf, binary.LittleEndian, &argc) + + path, err := bbuf.ReadBytes(0) + if exe != nil { + exe(string(chop(path))) + } + + // skip trailing \0's + for { + c, _ := bbuf.ReadByte() + if c != 0 { + bbuf.UnreadByte() + break // start of argv[0] + } + } + + for i := 0; i < int(argc); i++ { + arg, err := bbuf.ReadBytes(0) + if err == io.EOF { + break + } + if argv != nil { + argv(string(chop(arg))) + } + } + + if env == nil { + return nil + } + + delim := []byte{61} // "=" + + for { + line, err := bbuf.ReadBytes(0) + if err == io.EOF || line[0] == 0 { + break + } + pair := bytes.SplitN(chop(line), delim, 2) + env(string(pair[0]), string(pair[1])) + } + + return nil +} + +// XXX copied from zsyscall_darwin_amd64.go +func sysctl(mib []C.int, old *byte, oldlen *uintptr, + new *byte, newlen uintptr) (err error) { + var p0 unsafe.Pointer + p0 = unsafe.Pointer(&mib[0]) + _, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = e1 + } + return +} + +func vm_info(vmstat *C.vm_statistics_data_t) error { + var count C.mach_msg_type_number_t = C.HOST_VM_INFO_COUNT + + status := C.host_statistics( + C.host_t(C.mach_host_self()), + C.HOST_VM_INFO, + C.host_info_t(unsafe.Pointer(vmstat)), + &count) + + if status != C.KERN_SUCCESS { + return fmt.Errorf("host_statistics=%d", status) + } + + return nil +} + +// generic Sysctl buffer unmarshalling +func sysctlbyname(name string, data interface{}) (err error) { + val, err := syscall.Sysctl(name) + if err != nil { + return err + } + + buf := []byte(val) + + switch v := data.(type) { + case *uint64: + *v = *(*uint64)(unsafe.Pointer(&buf[0])) + return + } + + bbuf := bytes.NewBuffer([]byte(val)) + return binary.Read(bbuf, binary.LittleEndian, data) +} + +// syscall.Getfsstat() wrapper is broken, roll our own to workaround. +func getfsstat(buf []syscall.Statfs_t, flags int) (n int, err error) { + var ptr uintptr + var size uintptr + + if len(buf) > 0 { + ptr = uintptr(unsafe.Pointer(&buf[0])) + size = unsafe.Sizeof(buf[0]) * uintptr(len(buf)) + } else { + ptr = uintptr(0) + size = uintptr(0) + } + + trap := uintptr(syscall.SYS_GETFSSTAT64) + ret, _, errno := syscall.Syscall(trap, ptr, size, uintptr(flags)) + + n = int(ret) + if errno != 0 { + err = errno + } + + return +} + +func task_info(pid int, info *C.struct_proc_taskallinfo) error { + size := C.int(unsafe.Sizeof(*info)) + ptr := unsafe.Pointer(info) + + n := C.proc_pidinfo(C.int(pid), C.PROC_PIDTASKALLINFO, 0, ptr, size) + if n != size { + return syscall.ENOMEM + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_format.go b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_format.go new file mode 100644 index 00000000000..d80a64e88f8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_format.go @@ -0,0 +1,126 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "time" +) + +// Go version of apr_strfsize +func FormatSize(size uint64) string { + ord := []string{"K", "M", "G", "T", "P", "E"} + o := 0 + buf := new(bytes.Buffer) + w := bufio.NewWriter(buf) + + if size < 973 { + fmt.Fprintf(w, "%3d ", size) + w.Flush() + return buf.String() + } + + for { + remain := size & 1023 + size >>= 10 + + if size >= 973 { + o++ + continue + } + + if size < 9 || (size == 9 && remain < 973) { + remain = ((remain * 5) + 256) / 512 + if remain >= 10 { + size++ + remain = 0 + } + + fmt.Fprintf(w, "%d.%d%s", size, remain, ord[o]) + break + } + + if remain >= 512 { + size++ + } + + fmt.Fprintf(w, "%3d%s", size, ord[o]) + break + } + + w.Flush() + return buf.String() +} + +func FormatPercent(percent float64) string { + return strconv.FormatFloat(percent, 'f', -1, 64) + "%" +} + +func (self *FileSystemUsage) UsePercent() float64 { + b_used := (self.Total - self.Free) / 1024 + b_avail := self.Avail / 1024 + utotal := b_used + b_avail + used := b_used + + if utotal != 0 { + u100 := used * 100 + pct := u100 / utotal + if u100%utotal != 0 { + pct += 1 + } + return (float64(pct) / float64(100)) * 100.0 + } + + return 0.0 +} + +func (self *Uptime) Format() string { + buf := new(bytes.Buffer) + w := bufio.NewWriter(buf) + uptime := uint64(self.Length) + + days := uptime / (60 * 60 * 24) + + if days != 0 { + s := "" + if days > 1 { + s = "s" + } + fmt.Fprintf(w, "%d day%s, ", days, s) + } + + minutes := uptime / 60 + hours := minutes / 60 + hours %= 24 + minutes %= 60 + + fmt.Fprintf(w, "%2d:%02d", hours, minutes) + + w.Flush() + return buf.String() +} + +func (self *ProcTime) FormatStartTime() string { + if self.StartTime == 0 { + return "00:00" + } + start := time.Unix(int64(self.StartTime)/1000, 0) + format := "Jan02" + if time.Since(start).Seconds() < (60 * 60 * 24) { + format = "15:04" + } + return start.Format(format) +} + +func (self *ProcTime) FormatTotal() string { + t := self.Total / 1000 + ss := t % 60 + t /= 60 + mm := t % 60 + t /= 60 + hh := t % 24 + return fmt.Sprintf("%02d:%02d:%02d", hh, mm, ss) +} diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_interface.go b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_interface.go new file mode 100644 index 00000000000..dd72a76b069 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_interface.go @@ -0,0 +1,141 @@ +package sigar + +import ( + "time" +) + +type Sigar interface { + CollectCpuStats(collectionInterval time.Duration) (<-chan Cpu, chan<- struct{}) + GetLoadAverage() (LoadAverage, error) + GetMem() (Mem, error) + GetSwap() (Swap, error) + GetFileSystemUsage(string) (FileSystemUsage, error) +} + +type Cpu struct { + User uint64 + Nice uint64 + Sys uint64 + Idle uint64 + Wait uint64 + Irq uint64 + SoftIrq uint64 + Stolen uint64 +} + +func (cpu *Cpu) Total() uint64 { + return cpu.User + cpu.Nice + cpu.Sys + cpu.Idle + + cpu.Wait + cpu.Irq + cpu.SoftIrq + cpu.Stolen +} + +func (cpu Cpu) Delta(other Cpu) Cpu { + return Cpu{ + User: cpu.User - other.User, + Nice: cpu.Nice - other.Nice, + Sys: cpu.Sys - other.Sys, + Idle: cpu.Idle - other.Idle, + Wait: cpu.Wait - other.Wait, + Irq: cpu.Irq - other.Irq, + SoftIrq: cpu.SoftIrq - other.SoftIrq, + Stolen: cpu.Stolen - other.Stolen, + } +} + +type LoadAverage struct { + One, Five, Fifteen float64 +} + +type Uptime struct { + Length float64 +} + +type Mem struct { + Total uint64 + Used uint64 + Free uint64 + ActualFree uint64 + ActualUsed uint64 +} + +type Swap struct { + Total uint64 + Used uint64 + Free uint64 +} + +type CpuList struct { + List []Cpu +} + +type FileSystem struct { + DirName string + DevName string + TypeName string + SysTypeName string + Options string + Flags uint32 +} + +type FileSystemList struct { + List []FileSystem +} + +type FileSystemUsage struct { + Total uint64 + Used uint64 + Free uint64 + Avail uint64 + Files uint64 + FreeFiles uint64 +} + +type ProcList struct { + List []int +} + +type RunState byte + +const ( + RunStateSleep = 'S' + RunStateRun = 'R' + RunStateStop = 'T' + RunStateZombie = 'Z' + RunStateIdle = 'D' + RunStateUnknown = '?' +) + +type ProcState struct { + Name string + State RunState + Ppid int + Tty int + Priority int + Nice int + Processor int +} + +type ProcMem struct { + Size uint64 + Resident uint64 + Share uint64 + MinorFaults uint64 + MajorFaults uint64 + PageFaults uint64 +} + +type ProcTime struct { + StartTime uint64 + User uint64 + Sys uint64 + Total uint64 +} + +type ProcArgs struct { + List []string +} + +type ProcExe struct { + Name string + Cwd string + Root string +} diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_interface_test.go b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_interface_test.go new file mode 100644 index 00000000000..fe26abd1b9d --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_interface_test.go @@ -0,0 +1,135 @@ +package sigar_test + +import ( + "os" + "path/filepath" + "runtime" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "github.com/cloudfoundry/gosigar" +) + +var _ = Describe("Sigar", func() { + var invalidPid = 666666 + + It("cpu", func() { + cpu := Cpu{} + err := cpu.Get() + Expect(err).ToNot(HaveOccurred()) + }) + + It("load average", func() { + avg := LoadAverage{} + err := avg.Get() + Expect(err).ToNot(HaveOccurred()) + }) + + It("uptime", func() { + uptime := Uptime{} + err := uptime.Get() + Expect(err).ToNot(HaveOccurred()) + Expect(uptime.Length).To(BeNumerically(">", 0)) + }) + + It("mem", func() { + mem := Mem{} + err := mem.Get() + Expect(err).ToNot(HaveOccurred()) + + Expect(mem.Total).To(BeNumerically(">", 0)) + Expect(mem.Used + mem.Free).To(BeNumerically("<=", mem.Total)) + }) + + It("swap", func() { + swap := Swap{} + err := swap.Get() + Expect(err).ToNot(HaveOccurred()) + Expect(swap.Used + swap.Free).To(BeNumerically("<=", swap.Total)) + }) + + It("cpu list", func() { + cpulist := CpuList{} + err := cpulist.Get() + Expect(err).ToNot(HaveOccurred()) + + nsigar := len(cpulist.List) + numcpu := runtime.NumCPU() + Expect(nsigar).To(Equal(numcpu)) + }) + + It("file system list", func() { + fslist := FileSystemList{} + err := fslist.Get() + Expect(err).ToNot(HaveOccurred()) + + Expect(len(fslist.List)).To(BeNumerically(">", 0)) + }) + + It("file system usage", func() { + fsusage := FileSystemUsage{} + err := fsusage.Get("/") + Expect(err).ToNot(HaveOccurred()) + + err = fsusage.Get("T O T A L L Y B O G U S") + Expect(err).To(HaveOccurred()) + }) + + It("proc list", func() { + pids := ProcList{} + err := pids.Get() + Expect(err).ToNot(HaveOccurred()) + + Expect(len(pids.List)).To(BeNumerically(">", 2)) + + err = pids.Get() + Expect(err).ToNot(HaveOccurred()) + }) + + It("proc state", func() { + state := ProcState{} + err := state.Get(os.Getppid()) + Expect(err).ToNot(HaveOccurred()) + + Expect([]RunState{RunStateRun, RunStateSleep}).To(ContainElement(state.State)) + Expect([]string{"go", "ginkgo"}).To(ContainElement(state.Name)) + + err = state.Get(invalidPid) + Expect(err).To(HaveOccurred()) + }) + + It("proc mem", func() { + mem := ProcMem{} + err := mem.Get(os.Getppid()) + Expect(err).ToNot(HaveOccurred()) + + err = mem.Get(invalidPid) + Expect(err).To(HaveOccurred()) + }) + + It("proc time", func() { + time := ProcTime{} + err := time.Get(os.Getppid()) + Expect(err).ToNot(HaveOccurred()) + + err = time.Get(invalidPid) + Expect(err).To(HaveOccurred()) + }) + + It("proc args", func() { + args := ProcArgs{} + err := args.Get(os.Getppid()) + Expect(err).ToNot(HaveOccurred()) + + Expect(len(args.List)).To(BeNumerically(">=", 2)) + }) + + It("proc exe", func() { + exe := ProcExe{} + err := exe.Get(os.Getppid()) + Expect(err).ToNot(HaveOccurred()) + + Expect([]string{"go", "ginkgo"}).To(ContainElement(filepath.Base(exe.Name))) + }) +}) diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_linux.go b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_linux.go new file mode 100644 index 00000000000..68ffb0f9a6d --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_linux.go @@ -0,0 +1,386 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +import ( + "bufio" + "bytes" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + "syscall" +) + +var system struct { + ticks uint64 + btime uint64 +} + +var Procd string + +func init() { + system.ticks = 100 // C.sysconf(C._SC_CLK_TCK) + + Procd = "/proc" + + // grab system boot time + readFile(Procd+"/stat", func(line string) bool { + if strings.HasPrefix(line, "btime") { + system.btime, _ = strtoull(line[6:]) + return false // stop reading + } + return true + }) +} + +func (self *LoadAverage) Get() error { + line, err := ioutil.ReadFile(Procd + "/loadavg") + if err != nil { + return nil + } + + fields := strings.Fields(string(line)) + + self.One, _ = strconv.ParseFloat(fields[0], 64) + self.Five, _ = strconv.ParseFloat(fields[1], 64) + self.Fifteen, _ = strconv.ParseFloat(fields[2], 64) + + return nil +} + +func (self *Uptime) Get() error { + sysinfo := syscall.Sysinfo_t{} + + if err := syscall.Sysinfo(&sysinfo); err != nil { + return err + } + + self.Length = float64(sysinfo.Uptime) + + return nil +} + +func (self *Mem) Get() error { + var buffers, cached uint64 + table := map[string]*uint64{ + "MemTotal": &self.Total, + "MemFree": &self.Free, + "Buffers": &buffers, + "Cached": &cached, + } + + if err := parseMeminfo(table); err != nil { + return err + } + + self.Used = self.Total - self.Free + kern := buffers + cached + self.ActualFree = self.Free + kern + self.ActualUsed = self.Used - kern + + return nil +} + +func (self *Swap) Get() error { + table := map[string]*uint64{ + "SwapTotal": &self.Total, + "SwapFree": &self.Free, + } + + if err := parseMeminfo(table); err != nil { + return err + } + + self.Used = self.Total - self.Free + return nil +} + +func (self *Cpu) Get() error { + return readFile(Procd+"/stat", func(line string) bool { + if len(line) > 4 && line[0:4] == "cpu " { + parseCpuStat(self, line) + return false + } + return true + + }) +} + +func (self *CpuList) Get() error { + capacity := len(self.List) + if capacity == 0 { + capacity = 4 + } + list := make([]Cpu, 0, capacity) + + err := readFile(Procd+"/stat", func(line string) bool { + if len(line) > 3 && line[0:3] == "cpu" && line[3] != ' ' { + cpu := Cpu{} + parseCpuStat(&cpu, line) + list = append(list, cpu) + } + return true + }) + + self.List = list + + return err +} + +func (self *FileSystemList) Get() error { + capacity := len(self.List) + if capacity == 0 { + capacity = 10 + } + fslist := make([]FileSystem, 0, capacity) + + err := readFile("/etc/mtab", func(line string) bool { + fields := strings.Fields(line) + + fs := FileSystem{} + fs.DevName = fields[0] + fs.DirName = fields[1] + fs.SysTypeName = fields[2] + fs.Options = fields[3] + + fslist = append(fslist, fs) + + return true + }) + + self.List = fslist + + return err +} + +func (self *ProcList) Get() error { + dir, err := os.Open(Procd) + if err != nil { + return err + } + defer dir.Close() + + const readAllDirnames = -1 // see os.File.Readdirnames doc + + names, err := dir.Readdirnames(readAllDirnames) + if err != nil { + return err + } + + capacity := len(names) + list := make([]int, 0, capacity) + + for _, name := range names { + if name[0] < '0' || name[0] > '9' { + continue + } + pid, err := strconv.Atoi(name) + if err == nil { + list = append(list, pid) + } + } + + self.List = list + + return nil +} + +func (self *ProcState) Get(pid int) error { + contents, err := readProcFile(pid, "stat") + if err != nil { + return err + } + + fields := strings.Fields(string(contents)) + + self.Name = fields[1][1 : len(fields[1])-1] // strip ()'s + + self.State = RunState(fields[2][0]) + + self.Ppid, _ = strconv.Atoi(fields[3]) + + self.Tty, _ = strconv.Atoi(fields[6]) + + self.Priority, _ = strconv.Atoi(fields[17]) + + self.Nice, _ = strconv.Atoi(fields[18]) + + self.Processor, _ = strconv.Atoi(fields[38]) + + return nil +} + +func (self *ProcMem) Get(pid int) error { + contents, err := readProcFile(pid, "statm") + if err != nil { + return err + } + + fields := strings.Fields(string(contents)) + + size, _ := strtoull(fields[0]) + self.Size = size << 12 + + rss, _ := strtoull(fields[1]) + self.Resident = rss << 12 + + share, _ := strtoull(fields[2]) + self.Share = share << 12 + + contents, err = readProcFile(pid, "stat") + if err != nil { + return err + } + + fields = strings.Fields(string(contents)) + + self.MinorFaults, _ = strtoull(fields[10]) + self.MajorFaults, _ = strtoull(fields[12]) + self.PageFaults = self.MinorFaults + self.MajorFaults + + return nil +} + +func (self *ProcTime) Get(pid int) error { + contents, err := readProcFile(pid, "stat") + if err != nil { + return err + } + + fields := strings.Fields(string(contents)) + + user, _ := strtoull(fields[13]) + sys, _ := strtoull(fields[14]) + // convert to millis + self.User = user * (1000 / system.ticks) + self.Sys = sys * (1000 / system.ticks) + self.Total = self.User + self.Sys + + // convert to millis + self.StartTime, _ = strtoull(fields[21]) + self.StartTime /= system.ticks + self.StartTime += system.btime + self.StartTime *= 1000 + + return nil +} + +func (self *ProcArgs) Get(pid int) error { + contents, err := readProcFile(pid, "cmdline") + if err != nil { + return err + } + + bbuf := bytes.NewBuffer(contents) + + var args []string + + for { + arg, err := bbuf.ReadBytes(0) + if err == io.EOF { + break + } + args = append(args, string(chop(arg))) + } + + self.List = args + + return nil +} + +func (self *ProcExe) Get(pid int) error { + fields := map[string]*string{ + "exe": &self.Name, + "cwd": &self.Cwd, + "root": &self.Root, + } + + for name, field := range fields { + val, err := os.Readlink(procFileName(pid, name)) + + if err != nil { + return err + } + + *field = val + } + + return nil +} + +func parseMeminfo(table map[string]*uint64) error { + return readFile(Procd+"/meminfo", func(line string) bool { + fields := strings.Split(line, ":") + + if ptr := table[fields[0]]; ptr != nil { + num := strings.TrimLeft(fields[1], " ") + val, err := strtoull(strings.Fields(num)[0]) + if err == nil { + *ptr = val * 1024 + } + } + + return true + }) +} + +func parseCpuStat(self *Cpu, line string) error { + fields := strings.Fields(line) + + self.User, _ = strtoull(fields[1]) + self.Nice, _ = strtoull(fields[2]) + self.Sys, _ = strtoull(fields[3]) + self.Idle, _ = strtoull(fields[4]) + self.Wait, _ = strtoull(fields[5]) + self.Irq, _ = strtoull(fields[6]) + self.SoftIrq, _ = strtoull(fields[7]) + self.Stolen, _ = strtoull(fields[8]) + + return nil +} + +func readFile(file string, handler func(string) bool) error { + contents, err := ioutil.ReadFile(file) + if err != nil { + return err + } + + reader := bufio.NewReader(bytes.NewBuffer(contents)) + + for { + line, _, err := reader.ReadLine() + if err == io.EOF { + break + } + if !handler(string(line)) { + break + } + } + + return nil +} + +func strtoull(val string) (uint64, error) { + return strconv.ParseUint(val, 10, 64) +} + +func procFileName(pid int, name string) string { + return Procd + "/" + strconv.Itoa(pid) + "/" + name +} + +func readProcFile(pid int, name string) ([]byte, error) { + path := procFileName(pid, name) + contents, err := ioutil.ReadFile(path) + + if err != nil { + if perr, ok := err.(*os.PathError); ok { + if perr.Err == syscall.ENOENT { + return nil, syscall.ESRCH + } + } + } + + return contents, err +} diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_linux_test.go b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_linux_test.go new file mode 100644 index 00000000000..c5fcdbc9a9c --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_linux_test.go @@ -0,0 +1,225 @@ +package sigar_test + +import ( + "io/ioutil" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + sigar "github.com/cloudfoundry/gosigar" +) + +var _ = Describe("sigarLinux", func() { + var procd string + + BeforeEach(func() { + var err error + procd, err = ioutil.TempDir("", "sigarTests") + Expect(err).ToNot(HaveOccurred()) + sigar.Procd = procd + }) + + AfterEach(func() { + sigar.Procd = "/proc" + }) + + Describe("CPU", func() { + var ( + statFile string + cpu sigar.Cpu + ) + + BeforeEach(func() { + statFile = procd + "/stat" + cpu = sigar.Cpu{} + }) + + Describe("Get", func() { + It("gets CPU usage", func() { + statContents := []byte("cpu 25 1 2 3 4 5 6 7") + err := ioutil.WriteFile(statFile, statContents, 0644) + Expect(err).ToNot(HaveOccurred()) + + err = cpu.Get() + Expect(err).ToNot(HaveOccurred()) + Expect(cpu.User).To(Equal(uint64(25))) + }) + + It("ignores empty lines", func() { + statContents := []byte("cpu ") + err := ioutil.WriteFile(statFile, statContents, 0644) + Expect(err).ToNot(HaveOccurred()) + + err = cpu.Get() + Expect(err).ToNot(HaveOccurred()) + Expect(cpu.User).To(Equal(uint64(0))) + }) + }) + + Describe("CollectCpuStats", func() { + It("collects CPU usage over time", func() { + statContents := []byte("cpu 25 1 2 3 4 5 6 7") + err := ioutil.WriteFile(statFile, statContents, 0644) + Expect(err).ToNot(HaveOccurred()) + + concreteSigar := &sigar.ConcreteSigar{} + cpuUsages, stop := concreteSigar.CollectCpuStats(500 * time.Millisecond) + + Expect(<-cpuUsages).To(Equal(sigar.Cpu{ + User: uint64(25), + Nice: uint64(1), + Sys: uint64(2), + Idle: uint64(3), + Wait: uint64(4), + Irq: uint64(5), + SoftIrq: uint64(6), + Stolen: uint64(7), + })) + + statContents = []byte("cpu 30 3 7 10 25 55 36 65") + err = ioutil.WriteFile(statFile, statContents, 0644) + Expect(err).ToNot(HaveOccurred()) + + Expect(<-cpuUsages).To(Equal(sigar.Cpu{ + User: uint64(5), + Nice: uint64(2), + Sys: uint64(5), + Idle: uint64(7), + Wait: uint64(21), + Irq: uint64(50), + SoftIrq: uint64(30), + Stolen: uint64(58), + })) + + stop <- struct{}{} + }) + }) + }) + + Describe("Mem", func() { + var meminfoFile string + BeforeEach(func() { + meminfoFile = procd + "/meminfo" + + meminfoContents := ` +MemTotal: 374256 kB +MemFree: 274460 kB +Buffers: 9764 kB +Cached: 38648 kB +SwapCached: 0 kB +Active: 33772 kB +Inactive: 31184 kB +Active(anon): 16572 kB +Inactive(anon): 552 kB +Active(file): 17200 kB +Inactive(file): 30632 kB +Unevictable: 0 kB +Mlocked: 0 kB +SwapTotal: 786428 kB +SwapFree: 786428 kB +Dirty: 0 kB +Writeback: 0 kB +AnonPages: 16564 kB +Mapped: 6612 kB +Shmem: 584 kB +Slab: 19092 kB +SReclaimable: 9128 kB +SUnreclaim: 9964 kB +KernelStack: 672 kB +PageTables: 1864 kB +NFS_Unstable: 0 kB +Bounce: 0 kB +WritebackTmp: 0 kB +CommitLimit: 973556 kB +Committed_AS: 55880 kB +VmallocTotal: 34359738367 kB +VmallocUsed: 21428 kB +VmallocChunk: 34359713596 kB +HardwareCorrupted: 0 kB +AnonHugePages: 0 kB +HugePages_Total: 0 +HugePages_Free: 0 +HugePages_Rsvd: 0 +HugePages_Surp: 0 +Hugepagesize: 2048 kB +DirectMap4k: 59328 kB +DirectMap2M: 333824 kB +` + err := ioutil.WriteFile(meminfoFile, []byte(meminfoContents), 0444) + Expect(err).ToNot(HaveOccurred()) + }) + + It("returns correct memory info", func() { + mem := sigar.Mem{} + err := mem.Get() + Expect(err).ToNot(HaveOccurred()) + + Expect(mem.Total).To(BeNumerically("==", 374256*1024)) + Expect(mem.Free).To(BeNumerically("==", 274460*1024)) + }) + }) + + Describe("Swap", func() { + var meminfoFile string + BeforeEach(func() { + meminfoFile = procd + "/meminfo" + + meminfoContents := ` +MemTotal: 374256 kB +MemFree: 274460 kB +Buffers: 9764 kB +Cached: 38648 kB +SwapCached: 0 kB +Active: 33772 kB +Inactive: 31184 kB +Active(anon): 16572 kB +Inactive(anon): 552 kB +Active(file): 17200 kB +Inactive(file): 30632 kB +Unevictable: 0 kB +Mlocked: 0 kB +SwapTotal: 786428 kB +SwapFree: 786428 kB +Dirty: 0 kB +Writeback: 0 kB +AnonPages: 16564 kB +Mapped: 6612 kB +Shmem: 584 kB +Slab: 19092 kB +SReclaimable: 9128 kB +SUnreclaim: 9964 kB +KernelStack: 672 kB +PageTables: 1864 kB +NFS_Unstable: 0 kB +Bounce: 0 kB +WritebackTmp: 0 kB +CommitLimit: 973556 kB +Committed_AS: 55880 kB +VmallocTotal: 34359738367 kB +VmallocUsed: 21428 kB +VmallocChunk: 34359713596 kB +HardwareCorrupted: 0 kB +AnonHugePages: 0 kB +HugePages_Total: 0 +HugePages_Free: 0 +HugePages_Rsvd: 0 +HugePages_Surp: 0 +Hugepagesize: 2048 kB +DirectMap4k: 59328 kB +DirectMap2M: 333824 kB +` + err := ioutil.WriteFile(meminfoFile, []byte(meminfoContents), 0444) + Expect(err).ToNot(HaveOccurred()) + }) + + It("returns correct memory info", func() { + swap := sigar.Swap{} + err := swap.Get() + Expect(err).ToNot(HaveOccurred()) + + Expect(swap.Total).To(BeNumerically("==", 786428*1024)) + Expect(swap.Free).To(BeNumerically("==", 786428*1024)) + }) + }) +}) diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_suite_test.go b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_suite_test.go new file mode 100644 index 00000000000..44287f6319f --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_suite_test.go @@ -0,0 +1,13 @@ +package sigar_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "testing" +) + +func TestGosigar(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Gosigar Suite") +} diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_unix.go b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_unix.go new file mode 100644 index 00000000000..39f18784b4b --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_unix.go @@ -0,0 +1,26 @@ +// Copyright (c) 2012 VMware, Inc. + +// +build darwin freebsd linux netbsd openbsd + +package sigar + +import "syscall" + +func (self *FileSystemUsage) Get(path string) error { + stat := syscall.Statfs_t{} + err := syscall.Statfs(path, &stat) + if err != nil { + return err + } + + bsize := stat.Bsize / 512 + + self.Total = (uint64(stat.Blocks) * uint64(bsize)) >> 1 + self.Free = (uint64(stat.Bfree) * uint64(bsize)) >> 1 + self.Avail = (uint64(stat.Bavail) * uint64(bsize)) >> 1 + self.Used = self.Total - self.Free + self.Files = stat.Files + self.FreeFiles = stat.Ffree + + return nil +} diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_util.go b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_util.go new file mode 100644 index 00000000000..a02df9419c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_util.go @@ -0,0 +1,22 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +import ( + "unsafe" +) + +func bytePtrToString(ptr *int8) string { + bytes := (*[10000]byte)(unsafe.Pointer(ptr)) + + n := 0 + for bytes[n] != 0 { + n++ + } + + return string(bytes[0:n]) +} + +func chop(buf []byte) []byte { + return buf[0 : len(buf)-1] +} diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_windows.go b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_windows.go new file mode 100644 index 00000000000..e4cdac5bca1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_windows.go @@ -0,0 +1,160 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +// #include +// #include +import "C" + +import ( + "fmt" + "syscall" + "unsafe" +) + +var ( + modpsapi = syscall.NewLazyDLL("psapi.dll") + + procEnumProcesses = modpsapi.NewProc("EnumProcesses") +) + +func init() { +} + +func (self *LoadAverage) Get() error { + return nil +} + +func (self *Uptime) Get() error { + return nil +} + +func (self *Mem) Get() error { + var statex C.MEMORYSTATUSEX + statex.dwLength = C.DWORD(unsafe.Sizeof(statex)) + + succeeded := C.GlobalMemoryStatusEx(&statex) + if succeeded == C.FALSE { + lastError := C.GetLastError() + return fmt.Errorf("GlobalMemoryStatusEx failed with error: %d", int(lastError)) + } + + self.Total = uint64(statex.ullTotalPhys) + self.Free = uint64(statex.ullAvailPhys) + self.Used = self.Total - self.Free + vtotal := uint64(statex.ullTotalVirtual) + self.ActualFree = uint64(statex.ullAvailVirtual) + self.ActualUsed = vtotal - self.ActualFree + + return nil +} + +func (self *Swap) Get() error { + //return notImplemented() + return nil +} + +func (self *Cpu) Get() error { + + var lpIdleTime, lpKernelTime, lpUserTime C.FILETIME + + succeeded := C.GetSystemTimes(&lpIdleTime, &lpKernelTime, &lpUserTime) + if succeeded == C.FALSE { + lastError := C.GetLastError() + return fmt.Errorf("GetSystemTime failed with error: %d", int(lastError)) + } + + LOT := float64(0.0000001) + HIT := (LOT * 4294967296.0) + + idle := ((HIT * float64(lpIdleTime.dwHighDateTime)) + (LOT * float64(lpIdleTime.dwLowDateTime))) + user := ((HIT * float64(lpUserTime.dwHighDateTime)) + (LOT * float64(lpUserTime.dwLowDateTime))) + kernel := ((HIT * float64(lpKernelTime.dwHighDateTime)) + (LOT * float64(lpKernelTime.dwLowDateTime))) + system := (kernel - idle) + + self.Idle = uint64(idle) + self.User = uint64(user) + self.Sys = uint64(system) + return nil +} + +func (self *CpuList) Get() error { + return notImplemented() +} + +func (self *FileSystemList) Get() error { + return notImplemented() +} + +// Retrieves the process identifier for each process object in the system. + +func (self *ProcList) Get() error { + + var enumSize int + var pids [1024]C.DWORD + + // If the function succeeds, the return value is nonzero. + ret, _, _ := procEnumProcesses.Call( + uintptr(unsafe.Pointer(&pids[0])), + uintptr(unsafe.Sizeof(pids)), + uintptr(unsafe.Pointer(&enumSize)), + ) + if ret == 0 { + return fmt.Errorf("error %d while reading processes", C.GetLastError()) + } + + results := []int{} + + pids_size := enumSize / int(unsafe.Sizeof(pids[0])) + + for _, pid := range pids[:pids_size] { + results = append(results, int(pid)) + } + + self.List = results + + return nil +} + +func (self *ProcState) Get(pid int) error { + return notImplemented() +} + +func (self *ProcMem) Get(pid int) error { + return notImplemented() +} + +func (self *ProcTime) Get(pid int) error { + return notImplemented() +} + +func (self *ProcArgs) Get(pid int) error { + return notImplemented() +} + +func (self *ProcExe) Get(pid int) error { + return notImplemented() +} + +func (self *FileSystemUsage) Get(path string) error { + var availableBytes C.ULARGE_INTEGER + var totalBytes C.ULARGE_INTEGER + var totalFreeBytes C.ULARGE_INTEGER + + pathChars := C.CString(path) + defer C.free(unsafe.Pointer(pathChars)) + + succeeded := C.GetDiskFreeSpaceEx((*C.CHAR)(pathChars), &availableBytes, &totalBytes, &totalFreeBytes) + if succeeded == C.FALSE { + lastError := C.GetLastError() + return fmt.Errorf("GetDiskFreeSpaceEx failed with error: %d", int(lastError)) + } + + self.Total = *(*uint64)(unsafe.Pointer(&totalBytes)) + return nil +} + +func notImplemented() error { + panic("Not Implemented") + return nil +} diff --git a/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_windows_test.go b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_windows_test.go new file mode 100644 index 00000000000..868bdaab887 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/gosigar/sigar_windows_test.go @@ -0,0 +1,32 @@ +package sigar_test + +import ( + "os" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + sigar "github.com/cloudfoundry/gosigar" +) + +var _ = Describe("SigarWindows", func() { + Describe("Memory", func() { + It("gets the total memory", func() { + mem := sigar.Mem{} + err := mem.Get() + + Ω(err).ShouldNot(HaveOccurred()) + Ω(mem.Total).Should(BeNumerically(">", 0)) + }) + }) + + Describe("Disk", func() { + It("gets the total disk space", func() { + usage := sigar.FileSystemUsage{} + err := usage.Get(os.TempDir()) + + Ω(err).ShouldNot(HaveOccurred()) + Ω(usage.Total).Should(BeNumerically(">", 0)) + }) + }) +}) diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/cfgfile/cfgfile.go b/Godeps/_workspace/src/github.com/elastic/libbeat/cfgfile/cfgfile.go new file mode 100644 index 00000000000..997ab206c37 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/cfgfile/cfgfile.go @@ -0,0 +1,34 @@ +package cfgfile + +import ( + "flag" + "fmt" + "io/ioutil" + + "gopkg.in/yaml.v2" +) + +// Command line flags +var configfile *string +var testConfig *bool + +func CmdLineFlags(flags *flag.FlagSet, name string) { + configfile = flags.String("c", fmt.Sprintf("/etc/%s/%s.yml", name, name), "Configuration file") + testConfig = flags.Bool("test", false, "Test configuration and exit.") +} + +func Read(out interface{}) error { + filecontent, err := ioutil.ReadFile(*configfile) + if err != nil { + return fmt.Errorf("Fail to read %s: %v. Exiting.", *configfile, err) + } + if err = yaml.Unmarshal(filecontent, out); err != nil { + fmt.Errorf("YAML config parsing failed on %s: %v. Exiting.", *configfile, err) + } + + return nil +} + +func IsTestConfig() bool { + return *testConfig +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/common/bytes.go b/Godeps/_workspace/src/github.com/elastic/libbeat/common/bytes.go new file mode 100644 index 00000000000..c8be0c5c255 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/common/bytes.go @@ -0,0 +1,48 @@ +package common + +import ( + "bytes" + "errors" + "fmt" +) + +// Byte order utilities + +func Bytes_Ntohs(b []byte) uint16 { + return uint16(b[0])<<8 | uint16(b[1]) +} + +func Bytes_Ntohl(b []byte) uint32 { + return uint32(b[0])<<24 | uint32(b[1])<<16 | + uint32(b[2])<<8 | uint32(b[3]) +} + +func Bytes_Htohl(b []byte) uint32 { + return uint32(b[3])<<24 | uint32(b[2])<<16 | + uint32(b[1])<<8 | uint32(b[0]) +} + +func Bytes_Ntohll(b []byte) uint64 { + return uint64(b[0])<<56 | uint64(b[1])<<48 | + uint64(b[2])<<40 | uint64(b[3])<<32 | + uint64(b[4])<<24 | uint64(b[5])<<16 | + uint64(b[6])<<8 | uint64(b[7]) +} + +// Ipv4_Ntoa transforms an IP4 address in it's dotted notation +func Ipv4_Ntoa(ip uint32) string { + return fmt.Sprintf("%d.%d.%d.%d", + byte(ip>>24), byte(ip>>16), + byte(ip>>8), byte(ip)) +} + +// ReadString extracts the first null terminated string from +// a slice of bytes. +func ReadString(s []byte) (string, error) { + i := bytes.IndexByte(s, 0) + if i < 0 { + return "", errors.New("No string found") + } + res := string(s[:i]) + return res, nil +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/common/bytes_test.go b/Godeps/_workspace/src/github.com/elastic/libbeat/common/bytes_test.go new file mode 100644 index 00000000000..51cdaebc50c --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/common/bytes_test.go @@ -0,0 +1,219 @@ +package common + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBytes_Ntohs(t *testing.T) { + type io struct { + Input []byte + Output uint16 + } + + tests := []io{ + io{ + Input: []byte{0, 1}, + Output: 1, + }, + io{ + Input: []byte{1, 0}, + Output: 256, + }, + io{ + Input: []byte{1, 2}, + Output: 258, + }, + io{ + Input: []byte{2, 3}, + Output: 515, + }, + } + + for _, test := range tests { + assert.Equal(t, test.Output, Bytes_Ntohs(test.Input)) + } +} + +func TestBytes_Ntohl(t *testing.T) { + type io struct { + Input []byte + Output uint32 + } + + tests := []io{ + io{ + Input: []byte{0, 0, 0, 1}, + Output: 1, + }, + io{ + Input: []byte{0, 0, 1, 0}, + Output: 256, + }, + io{ + Input: []byte{0, 1, 0, 0}, + Output: 1 << 16, + }, + io{ + Input: []byte{1, 0, 0, 0}, + Output: 1 << 24, + }, + io{ + Input: []byte{1, 0, 15, 0}, + Output: 0x01000f00, + }, + } + + for _, test := range tests { + assert.Equal(t, test.Output, Bytes_Ntohl(test.Input)) + } +} + +func TestBytes_Htohl(t *testing.T) { + type io struct { + Input []byte + Output uint32 + } + + tests := []io{ + io{ + Input: []byte{0, 0, 0, 1}, + Output: 1 << 24, + }, + io{ + Input: []byte{0, 0, 1, 0}, + Output: 1 << 16, + }, + io{ + Input: []byte{0, 1, 0, 0}, + Output: 256, + }, + io{ + Input: []byte{1, 0, 0, 0}, + Output: 1, + }, + io{ + Input: []byte{1, 0, 15, 0}, + Output: 0x000f0001, + }, + } + + for _, test := range tests { + assert.Equal(t, test.Output, Bytes_Htohl(test.Input)) + } +} + +func TestBytes_Ntohll(t *testing.T) { + type io struct { + Input []byte + Output uint64 + } + + tests := []io{ + io{ + Input: []byte{0, 0, 0, 0, 0, 0, 0, 1}, + Output: 1, + }, + io{ + Input: []byte{0, 0, 0, 0, 0, 0, 1, 0}, + Output: 256, + }, + io{ + Input: []byte{0, 0, 0, 0, 0, 1, 0, 0}, + Output: 1 << 16, + }, + io{ + Input: []byte{0, 0, 0, 0, 1, 0, 0, 0}, + Output: 1 << 24, + }, + io{ + Input: []byte{0, 0, 0, 1, 0, 0, 0, 0}, + Output: 1 << 32, + }, + io{ + Input: []byte{0, 0, 1, 0, 0, 0, 0, 0}, + Output: 1 << 40, + }, + io{ + Input: []byte{0, 1, 0, 0, 0, 0, 0, 0}, + Output: 1 << 48, + }, + io{ + Input: []byte{1, 0, 0, 0, 0, 0, 0, 0}, + Output: 1 << 56, + }, + io{ + Input: []byte{0, 1, 0, 0, 1, 0, 15, 0}, + Output: 0x0001000001000f00, + }, + } + + for _, test := range tests { + assert.Equal(t, test.Output, Bytes_Ntohll(test.Input)) + } +} + +func TestIpv4_Ntoa(t *testing.T) { + type io struct { + Input uint32 + Output string + } + + tests := []io{ + io{ + Input: 0x7f000001, + Output: "127.0.0.1", + }, + io{ + Input: 0xc0a80101, + Output: "192.168.1.1", + }, + io{ + Input: 0, + Output: "0.0.0.0", + }, + } + + for _, test := range tests { + assert.Equal(t, test.Output, Ipv4_Ntoa(test.Input)) + } +} + +func TestReadString(t *testing.T) { + type io struct { + Input []byte + Output string + Err error + } + + tests := []io{ + io{ + Input: []byte{'a', 'b', 'c', 0, 'd', 'e', 'f'}, + Output: "abc", + Err: nil, + }, + io{ + Input: []byte{0}, + Output: "", + Err: nil, + }, + io{ + Input: []byte{'a', 'b', 'c'}, + Output: "", + Err: errors.New("No string found"), + }, + io{ + Input: []byte{}, + Output: "", + Err: errors.New("No string found"), + }, + } + + for _, test := range tests { + res, err := ReadString(test.Input) + assert.Equal(t, test.Err, err) + assert.Equal(t, test.Output, res) + } +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/common/csv.go b/Godeps/_workspace/src/github.com/elastic/libbeat/common/csv.go new file mode 100644 index 00000000000..f55f2404faf --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/common/csv.go @@ -0,0 +1,35 @@ +package common + +import ( + "bytes" + "encoding/csv" + "strings" +) + +// Takes a set of fields and rows and returns a string +// representing the CSV representation for the fields and rows. +func DumpInCSVFormat(fields []string, rows [][]string) string { + + var buf bytes.Buffer + writer := csv.NewWriter(&buf) + + for i, field := range fields { + fields[i] = strings.Replace(field, "\n", "\\n", -1) + } + if len(fields) > 0 { + writer.Write(fields) + } + + for _, row := range rows { + for i, field := range row { + field = strings.Replace(field, "\n", "\\n", -1) + field = strings.Replace(field, "\r", "\\r", -1) + row[i] = field + } + writer.Write(row) + } + writer.Flush() + + csv := buf.String() + return csv +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/common/csv_test.go b/Godeps/_workspace/src/github.com/elastic/libbeat/common/csv_test.go new file mode 100644 index 00000000000..2a5edcea885 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/common/csv_test.go @@ -0,0 +1,46 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_CSVDump(t *testing.T) { + type io struct { + Fields []string + Rows [][]string + Output string + } + + tests := []io{ + io{ + Fields: []string{"f1", "f2"}, + Rows: [][]string{ + []string{"11", "12"}, + []string{"21", "22"}, + }, + Output: "f1,f2\n11,12\n21,22\n", + }, + io{ + Fields: []string{"f1", "f2"}, + Rows: [][]string{ + []string{"11"}, + []string{"21", "22", "23"}, + }, + Output: "f1,f2\n11\n21,22,23\n", + }, + io{ + Fields: []string{"f\n\n1", "f\n2"}, + Rows: [][]string{ + []string{"11"}, + []string{"2\r\n1", "2\r\n2", "23"}, + }, + Output: "f\\n\\n1,f\\n2\n11\n2\\r\\n1,2\\r\\n2,23\n", + }, + } + + for _, test := range tests { + assert.Equal(t, test.Output, DumpInCSVFormat(test.Fields, test.Rows)) + } +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/common/datetime.go b/Godeps/_workspace/src/github.com/elastic/libbeat/common/datetime.go new file mode 100644 index 00000000000..5f7b8a631e0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/common/datetime.go @@ -0,0 +1,46 @@ +package common + +import ( + "encoding/json" + "errors" + "time" +) + +// Layout to be used in the timestamp marshaling/unmarshaling everywhere. +// The timezone must always be UTC. +const TsLayout = "2006-01-02T15:04:05.000Z" + +type Time time.Time + +// MarshalJSON implements json.Marshaler interface. +// The time is a quoted string in the JsTsLayout format. +func (t Time) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Time(t).UTC().Format(TsLayout)) +} + +// UnmarshalJSON implements js.Unmarshaler interface. +// The time is expected to be a quoted string in TsLayout +// format. +func (t *Time) UnmarshalJSON(data []byte) (err error) { + if data[0] != []byte(`"`)[0] || data[len(data)-1] != []byte(`"`)[0] { + return errors.New("Not quoted") + } + *t, err = ParseTime(string(data[1 : len(data)-1])) + return +} + +// ParseTime parses a time in the TsLayout format. +func ParseTime(timespec string) (Time, error) { + t, err := time.Parse(TsLayout, timespec) + return Time(t), err +} + +// MustParseTime is a convenience equivalent of the ParseTime function +// that panics in case of errors. +func MustParseTime(timespec string) Time { + ts, err := ParseTime(timespec) + if err != nil { + panic(err) + } + return ts +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/common/datetime_test.go b/Godeps/_workspace/src/github.com/elastic/libbeat/common/datetime_test.go new file mode 100644 index 00000000000..fe08525926e --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/common/datetime_test.go @@ -0,0 +1,101 @@ +package common + +import ( + "encoding/json" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestParseTime(t *testing.T) { + + type inputOutput struct { + Input string + Output time.Time + } + + tests := []inputOutput{ + inputOutput{ + Input: "2015-01-24T14:06:05.071Z", + Output: time.Date(2015, time.January, 24, 14, 06, 05, 71*1e6, time.UTC), + }, + inputOutput{ + Input: "2015-03-01T11:19:05.112Z", + Output: time.Date(2015, time.March, 1, 11, 19, 05, 112*1e6, time.UTC), + }, + inputOutput{ + Input: "2015-02-28T11:19:05.112Z", + Output: time.Date(2015, time.February, 28, 11, 19, 05, 112*1e6, time.UTC), + }, + // Golang time pkg happily parses 'wrong' dates like these. + // Just to have in mind. + inputOutput{ + Input: "2015-02-29T11:19:05.112Z", + Output: time.Date(2015, time.March, 01, 11, 19, 05, 112*1e6, time.UTC), + }, + inputOutput{ + Input: "2015-03-31T11:19:05.112Z", + Output: time.Date(2015, time.March, 31, 11, 19, 05, 112*1e6, time.UTC), + }, + inputOutput{ + Input: "2015-04-31T11:19:05.112Z", + Output: time.Date(2015, time.April, 31, 11, 19, 05, 112*1e6, time.UTC), + }, + } + + for _, test := range tests { + result, err := ParseTime(test.Input) + assert.Nil(t, err) + assert.Equal(t, test.Output, time.Time(result)) + } +} + +func TestParseTimeNegative(t *testing.T) { + type inputOutput struct { + Input string + Err string + } + + tests := []inputOutput{ + inputOutput{ + Input: "2015-02-29TT14:06:05.071Z", + Err: "parsing time \"2015-02-29TT14:06:05.071Z\" as \"2006-01-02T15:04:05.000Z\": cannot parse \"T14:06:05.071Z\" as \"15\"", + }, + } + + for _, test := range tests { + _, err := ParseTime(test.Input) + assert.NotNil(t, err) + assert.Equal(t, test.Err, err.Error()) + } +} + +func TestTimeMarshal(t *testing.T) { + type inputOutput struct { + Input MapStr + Output string + } + + tests := []inputOutput{ + inputOutput{ + Input: MapStr{ + "timestamp": Time(time.Date(2015, time.March, 01, 11, 19, 05, 112*1e6, time.UTC)), + }, + Output: `{"timestamp":"2015-03-01T11:19:05.112Z"}`, + }, + inputOutput{ + Input: MapStr{ + "timestamp": MustParseTime("2015-03-01T11:19:05.112Z"), + "another": MustParseTime("2015-03-01T14:19:05.112Z"), + }, + Output: `{"another":"2015-03-01T14:19:05.112Z","timestamp":"2015-03-01T11:19:05.112Z"}`, + }, + } + + for _, test := range tests { + result, err := json.Marshal(test.Input) + assert.Nil(t, err) + assert.Equal(t, test.Output, string(result)) + } +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/common/droppriv/droppriv_unix.go b/Godeps/_workspace/src/github.com/elastic/libbeat/common/droppriv/droppriv_unix.go new file mode 100644 index 00000000000..c8146583563 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/common/droppriv/droppriv_unix.go @@ -0,0 +1,41 @@ +// +build !windows + +package droppriv + +import ( + "errors" + "fmt" + "syscall" + + "github.com/elastic/libbeat/logp" +) + +type RunOptions struct { + Uid *int + Gid *int +} + +func DropPrivileges(config RunOptions) error { + var err error + + if config.Uid == nil { + // not found, no dropping privileges but no err + return nil + } + + if config.Gid == nil { + return errors.New("GID must be specified for dropping privileges") + } + + logp.Info("Switching to user: %d.%d", config.Uid, config.Gid) + + if err = syscall.Setgid(*config.Gid); err != nil { + return fmt.Errorf("setgid: %s", err.Error()) + } + + if err = syscall.Setuid(*config.Uid); err != nil { + return fmt.Errorf("setuid: %s", err.Error()) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/common/droppriv/droppriv_windows.go b/Godeps/_workspace/src/github.com/elastic/libbeat/common/droppriv/droppriv_windows.go new file mode 100644 index 00000000000..136c80ef648 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/common/droppriv/droppriv_windows.go @@ -0,0 +1,18 @@ +package droppriv + +import "errors" + +type RunOptions struct { + Uid *int + Gid *int +} + +func DropPrivileges(config RunOptions) error { + + if config.Uid == nil { + // not found, no dropping privileges but no err + return nil + } + + return errors.New("Dropping privileges is not supported on Windows") +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/common/endpoint.go b/Godeps/_workspace/src/github.com/elastic/libbeat/common/endpoint.go new file mode 100644 index 00000000000..d25445fdf7b --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/common/endpoint.go @@ -0,0 +1,10 @@ +package common + +// Representing an endpoint in the communication. +type Endpoint struct { + Ip string + Port uint16 + Name string + Cmdline string + Proc string +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/common/geolite.go b/Godeps/_workspace/src/github.com/elastic/libbeat/common/geolite.go new file mode 100644 index 00000000000..bde6dd9adfb --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/common/geolite.go @@ -0,0 +1,63 @@ +package common + +import ( + "os" + "path/filepath" + + "github.com/elastic/libbeat/logp" + + "github.com/nranchev/go-libGeoIP" +) + +type Geoip struct { + Paths *[]string +} + +func LoadGeoIPData(config Geoip) *libgeo.GeoIP { + + geoip_paths := []string{ + "/usr/share/GeoIP/GeoIP.dat", + "/usr/local/var/GeoIP/GeoIP.dat", + } + if config.Paths != nil { + geoip_paths = *config.Paths + } + if len(geoip_paths) == 0 { + // disabled + return nil + } + + // look for the first existing path + var geoip_path string + for _, path := range geoip_paths { + fi, err := os.Lstat(path) + if err != nil { + continue + } + + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + // follow symlink + geoip_path, err = filepath.EvalSymlinks(path) + if err != nil { + logp.Warn("Could not load GeoIP data: %s", err.Error()) + return nil + } + } else { + geoip_path = path + } + break + } + + if len(geoip_path) == 0 { + logp.Warn("Couldn't load GeoIP database") + return nil + } + + geoLite, err := libgeo.Load(geoip_path) + if err != nil { + logp.Warn("Could not load GeoIP data: %s", err.Error()) + } + + logp.Info("Loaded GeoIP data from: %s", geoip_path) + return geoLite +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/common/mapstr.go b/Godeps/_workspace/src/github.com/elastic/libbeat/common/mapstr.go new file mode 100644 index 00000000000..6d3262495d5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/common/mapstr.go @@ -0,0 +1,81 @@ +package common + +import ( + "encoding/json" + "fmt" + "time" +) + +// Commonly used map of things, used in JSON creation and the like. +type MapStr map[string]interface{} + +// MapStrUnion creates a new MapStr containing the union of the +// key-value pairs of the two maps. If the same key is present in +// both, the key-value pairs from dict2 overwrite the ones from dict1. +func MapStrUnion(dict1 MapStr, dict2 MapStr) MapStr { + dict := MapStr{} + + for k, v := range dict1 { + dict[k] = v + } + + for k, v := range dict2 { + dict[k] = v + } + return dict +} + +// Update copies all the key-value pairs from the +// d map overwriting any existing keys. +func (m MapStr) Update(d MapStr) { + for k, v := range d { + m[k] = v + } +} + +// Checks if a timestamp field exists and if it doesn't it adds +// one by using the injected now() function as a time source. +func (m MapStr) EnsureTimestampField(now func() time.Time) error { + ts, exists := m["timestamp"] + if !exists { + m["timestamp"] = Time(now()) + return nil + } + + _, is_common_time := ts.(Time) + if is_common_time { + // already perfect + return nil + } + + tstime, is_time := ts.(time.Time) + if is_time { + m["timestamp"] = Time(tstime) + return nil + } + + tsstr, is_string := ts.(string) + if is_string { + var err error + m["timestamp"], err = ParseTime(tsstr) + return err + } + return fmt.Errorf("Don't know how to convert %v to a Time value", ts) +} + +func (m MapStr) EnsureCountField() error { + _, exists := m["count"] + if !exists { + m["count"] = 1 + } + return nil +} + +// Prints the dict as a json +func (m MapStr) String() string { + bytes, err := json.Marshal(m) + if err != nil { + return fmt.Sprintf("Not valid json: %v", err) + } + return string(bytes) +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/common/mapstr_test.go b/Godeps/_workspace/src/github.com/elastic/libbeat/common/mapstr_test.go new file mode 100644 index 00000000000..723609265f6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/common/mapstr_test.go @@ -0,0 +1,187 @@ +package common + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestMapStrUpdate(t *testing.T) { + assert := assert.New(t) + + a := MapStr{ + "a": 1, + "b": 2, + } + b := MapStr{ + "b": 3, + "c": 4, + } + + a.Update(b) + + assert.Equal(a, MapStr{"a": 1, "b": 3, "c": 4}) +} + +func TestMapStrUnion(t *testing.T) { + assert := assert.New(t) + + a := MapStr{ + "a": 1, + "b": 2, + } + b := MapStr{ + "b": 3, + "c": 4, + } + + c := MapStrUnion(a, b) + + assert.Equal(c, MapStr{"a": 1, "b": 3, "c": 4}) +} + +func TestEnsureTimestampField(t *testing.T) { + + type io struct { + Input MapStr + Output MapStr + } + + tests := []io{ + // should add a timestamp field if it doesn't exists. + io{ + Input: MapStr{}, + Output: MapStr{ + "timestamp": MustParseTime("2015-03-01T12:34:56.123Z"), + }, + }, + // should convert from string to Time + io{ + Input: MapStr{"timestamp": "2015-03-01T12:34:57.123Z"}, + Output: MapStr{ + "timestamp": MustParseTime("2015-03-01T12:34:57.123Z"), + }, + }, + // should convert from time.Time to Time + io{ + Input: MapStr{ + "timestamp": time.Date(2015, time.March, 01, + 12, 34, 57, 123*1e6, time.UTC), + }, + Output: MapStr{ + "timestamp": MustParseTime("2015-03-01T12:34:57.123Z"), + }, + }, + // should leave a Time alone + io{ + Input: MapStr{ + "timestamp": MustParseTime("2015-03-01T12:34:57.123Z"), + }, + Output: MapStr{ + "timestamp": MustParseTime("2015-03-01T12:34:57.123Z"), + }, + }, + } + + now := func() time.Time { + return time.Date(2015, time.March, 01, 12, 34, 56, 123*1e6, time.UTC) + } + + for _, test := range tests { + m := test.Input + err := m.EnsureTimestampField(now) + assert.Nil(t, err) + assert.Equal(t, test.Output, m) + } +} + +func TestEnsureTimestampFieldNegative(t *testing.T) { + + inputs := []MapStr{ + // should error on invalid string layout (microseconds) + MapStr{ + "timestamp": "2015-03-01T12:34:57.123456Z", + }, + // should error when the timestamp is an integer + MapStr{ + "timestamp": 123456678, + }, + } + + now := func() time.Time { + return time.Date(2015, time.March, 01, 12, 34, 56, 123*1e6, time.UTC) + } + + for _, input := range inputs { + m := input + err := m.EnsureTimestampField(now) + assert.NotNil(t, err) + } +} + +func TestEnsureCountFiled(t *testing.T) { + type io struct { + Input MapStr + Output MapStr + } + tests := []io{ + // should add a count field if there is none + io{ + Input: MapStr{ + "a": "b", + }, + Output: MapStr{ + "a": "b", + "count": 1, + }, + }, + + // should do nothing if there is already a count + io{ + Input: MapStr{ + "count": 1, + }, + Output: MapStr{ + "count": 1, + }, + }, + + // should add count on an empty dict + io{ + Input: MapStr{}, + Output: MapStr{"count": 1}, + }, + } + + for _, test := range tests { + m := test.Input + err := m.EnsureCountField() + assert.Nil(t, err) + assert.Equal(t, test.Output, m) + } +} + +func TestString(t *testing.T) { + type io struct { + Input MapStr + Output string + } + tests := []io{ + io{ + Input: MapStr{ + "a": "b", + }, + Output: `{"a":"b"}`, + }, + io{ + Input: MapStr{ + "a": []int{1, 2, 3}, + }, + Output: `{"a":[1,2,3]}`, + }, + } + for _, test := range tests { + assert.Equal(t, test.Output, test.Input.String()) + } +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/common/net.go b/Godeps/_workspace/src/github.com/elastic/libbeat/common/net.go new file mode 100644 index 00000000000..d8c1b30cd2b --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/common/net.go @@ -0,0 +1,50 @@ +package common + +import ( + "fmt" + "net" +) + +// LocalIpAddrs finds the IP addresses of the hosts on which +// the shipper currently runs on. +func LocalIpAddrs() ([]net.IP, error) { + var localAddrs = []net.IP{} + addrs, err := net.InterfaceAddrs() + if err != nil { + return []net.IP{}, err + } + for _, addr := range addrs { + if ipnet, ok := addr.(*net.IPNet); ok { + localAddrs = append(localAddrs, ipnet.IP) + } + } + return localAddrs, nil +} + +// LocalIpAddrs finds the IP addresses of the hosts on which +// the shipper currently runs on and returns them as an array of +// strings. +func LocalIpAddrsAsStrings(include_loopbacks bool) ([]string, error) { + var localAddrsStrings = []string{} + var err error + ipaddrs, err := LocalIpAddrs() + if err != nil { + return []string{}, err + } + for _, ipaddr := range ipaddrs { + if include_loopbacks || !ipaddr.IsLoopback() { + localAddrsStrings = append(localAddrsStrings, ipaddr.String()) + } + } + return localAddrsStrings, err +} + +// IsLoopback check if a particular IP notation corresponds +// to a loopback interface. +func IsLoopback(ip_str string) (bool, error) { + ip := net.ParseIP(ip_str) + if ip == nil { + return false, fmt.Errorf("Wrong IP format %s", ip_str) + } + return ip.IsLoopback(), nil +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/common/statuses.go b/Godeps/_workspace/src/github.com/elastic/libbeat/common/statuses.go new file mode 100644 index 00000000000..dd2b2b6c8da --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/common/statuses.go @@ -0,0 +1,7 @@ +package common + +// standardized status values +const ( + OK_STATUS = "OK" + ERROR_STATUS = "Error" +) diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/common/tuples.go b/Godeps/_workspace/src/github.com/elastic/libbeat/common/tuples.go new file mode 100644 index 00000000000..cb428a79ff7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/common/tuples.go @@ -0,0 +1,135 @@ +package common + +import ( + "fmt" + "net" +) + +// In order for the IpPortTuple and the TcpTuple to be used as +// hashtable keys, they need to have a fixed size. This means the +// net.IP is problematic because it's internally represented as a slice. +// We're introducing the HashableIpPortTuple and the HashableTcpTuple +// types which are internally simple byte arrays. + +const MaxIpPortTupleRawSize = 16 + 16 + 2 + 2 + +type HashableIpPortTuple [MaxIpPortTupleRawSize]byte + +type IpPortTuple struct { + Ip_length int + Src_ip, Dst_ip net.IP + Src_port, Dst_port uint16 + + raw HashableIpPortTuple // Src_ip:Src_port:Dst_ip:Dst_port + revRaw HashableIpPortTuple // Dst_ip:Dst_port:Src_ip:Src_port +} + +func NewIpPortTuple(ip_length int, src_ip net.IP, src_port uint16, + dst_ip net.IP, dst_port uint16) IpPortTuple { + + tuple := IpPortTuple{ + Ip_length: ip_length, + Src_ip: src_ip, + Dst_ip: dst_ip, + Src_port: src_port, + Dst_port: dst_port, + } + tuple.ComputeHashebles() + + return tuple +} + +func (t *IpPortTuple) ComputeHashebles() { + copy(t.raw[0:16], t.Src_ip) + copy(t.raw[16:18], []byte{byte(t.Src_port >> 8), byte(t.Src_port)}) + copy(t.raw[18:34], t.Dst_ip) + copy(t.raw[34:36], []byte{byte(t.Dst_port >> 8), byte(t.Dst_port)}) + + copy(t.revRaw[0:16], t.Dst_ip) + copy(t.revRaw[16:18], []byte{byte(t.Dst_port >> 8), byte(t.Dst_port)}) + copy(t.revRaw[18:34], t.Src_ip) + copy(t.revRaw[34:36], []byte{byte(t.Src_port >> 8), byte(t.Src_port)}) +} + +func (t *IpPortTuple) String() string { + return fmt.Sprintf("IpPortTuple src[%s:%d] dst[%s:%d]", + t.Src_ip.String(), + t.Src_port, + t.Dst_ip.String(), + t.Dst_port) +} + +// Hashable returns a hashable value that uniquely identifies +// the IP-port tuple. +func (t *IpPortTuple) Hashable() HashableIpPortTuple { + return t.raw +} + +// Hashable returns a hashable value that uniquely identifies +// the IP-port tuple after swapping the source and destination. +func (t *IpPortTuple) RevHashable() HashableIpPortTuple { + return t.revRaw +} + +const MaxTcpTupleRawSize = 16 + 16 + 2 + 2 + 4 + +type HashableTcpTuple [MaxTcpTupleRawSize]byte + +type TcpTuple struct { + Ip_length int + Src_ip, Dst_ip net.IP + Src_port, Dst_port uint16 + Stream_id uint32 + + raw HashableTcpTuple // Src_ip:Src_port:Dst_ip:Dst_port:stream_id +} + +func TcpTupleFromIpPort(t *IpPortTuple, tcp_id uint32) TcpTuple { + tuple := TcpTuple{ + Ip_length: t.Ip_length, + Src_ip: t.Src_ip, + Dst_ip: t.Dst_ip, + Src_port: t.Src_port, + Dst_port: t.Dst_port, + Stream_id: tcp_id, + } + tuple.ComputeHashebles() + + return tuple +} + +func (t *TcpTuple) ComputeHashebles() { + copy(t.raw[0:16], t.Src_ip) + copy(t.raw[16:18], []byte{byte(t.Src_port >> 8), byte(t.Src_port)}) + copy(t.raw[18:34], t.Dst_ip) + copy(t.raw[34:36], []byte{byte(t.Dst_port >> 8), byte(t.Dst_port)}) + copy(t.raw[36:40], []byte{byte(t.Stream_id >> 24), byte(t.Stream_id >> 16), + byte(t.Stream_id >> 8), byte(t.Stream_id)}) +} + +func (t TcpTuple) String() string { + return fmt.Sprintf("TcpTuple src[%s:%d] dst[%s:%d] stream_id[%d]", + t.Src_ip.String(), + t.Src_port, + t.Dst_ip.String(), + t.Dst_port, + t.Stream_id) +} + +// Returns a pointer to the equivalent IpPortTuple. +func (t TcpTuple) IpPort() *IpPortTuple { + ipport := NewIpPortTuple(t.Ip_length, t.Src_ip, t.Src_port, + t.Dst_ip, t.Dst_port) + return &ipport +} + +// Hashable() returns a hashable value that uniquely identifies +// the TCP tuple. +func (t *TcpTuple) Hashable() HashableTcpTuple { + return t.raw +} + +// Source and destination process names, as found by the proc module. +type CmdlineTuple struct { + Src, Dst []byte +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/common/tuples_test.go b/Godeps/_workspace/src/github.com/elastic/libbeat/common/tuples_test.go new file mode 100644 index 00000000000..bdacaa087cc --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/common/tuples_test.go @@ -0,0 +1,71 @@ +package common + +import ( + "net" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTuples_tuples_ipv4(t *testing.T) { + assert := assert.New(t) + + var tuple IpPortTuple + + // from net/ip.go + var v4InV6Prefix = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff} + + tuple = NewIpPortTuple(4, net.IPv4(192, 168, 0, 1), 9200, net.IPv4(192, 168, 0, 2), 9201) + + assert.Equal(v4InV6Prefix, tuple.raw[0:12], "prefix_src") + assert.Equal([]byte{192, 168, 0, 1}, tuple.raw[12:16], "src_ip") + assert.Equal([]byte{0x23, 0xf0}, tuple.raw[16:18], "src_port") + + assert.Equal(v4InV6Prefix, tuple.raw[18:30], "prefix_dst") + assert.Equal([]byte{192, 168, 0, 2}, tuple.raw[30:34], "dst_ip") + assert.Equal([]byte{0x23, 0xf1}, tuple.raw[34:36], "dst_port") + assert.Equal(36, len(tuple.raw)) + + assert.Equal(v4InV6Prefix, tuple.revRaw[0:12], "rev prefix_dst") + assert.Equal([]byte{192, 168, 0, 2}, tuple.revRaw[12:16], "rev dst_ip") + assert.Equal([]byte{0x23, 0xf1}, tuple.revRaw[16:18], "rev dst_port") + + assert.Equal(v4InV6Prefix, tuple.revRaw[18:30], "rev prefix_src") + assert.Equal([]byte{192, 168, 0, 1}, tuple.revRaw[30:34], "rev src_ip") + assert.Equal([]byte{0x23, 0xf0}, tuple.revRaw[34:36], "rev src_port") + assert.Equal(36, len(tuple.revRaw)) + + tcp_tuple := TcpTupleFromIpPort(&tuple, 1) + assert.Equal(tuple.raw[:], tcp_tuple.raw[0:36], "Wrong TCP tuple hashable") + assert.Equal([]byte{0, 0, 0, 1}, tcp_tuple.raw[36:40], "stream_id") +} + +func TestTuples_tuples_ipv6(t *testing.T) { + assert := assert.New(t) + + var tuple IpPortTuple + + tuple = NewIpPortTuple(16, net.ParseIP("2001:db8::1"), + 9200, net.ParseIP("2001:db8::123:12:1"), 9201) + + ip1 := []byte{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x1} + ip2 := []byte{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0, 0, 0, 0x1, 0x23, 0, 0x12, 0, 0x1} + + assert.Equal(ip1, tuple.raw[0:16], "src_ip") + assert.Equal([]byte{0x23, 0xf0}, tuple.raw[16:18], "src_port") + + assert.Equal(ip2, tuple.raw[18:34], "dst_ip") + assert.Equal([]byte{0x23, 0xf1}, tuple.raw[34:36], "dst_port") + assert.Equal(36, len(tuple.raw)) + + assert.Equal(ip2, tuple.revRaw[0:16], "rev dst_ip") + assert.Equal([]byte{0x23, 0xf1}, tuple.revRaw[16:18], "rev dst_port") + + assert.Equal(ip1, tuple.revRaw[18:34], "rev src_ip") + assert.Equal([]byte{0x23, 0xf0}, tuple.revRaw[34:36], "rev src_port") + assert.Equal(36, len(tuple.revRaw)) + + tcp_tuple := TcpTupleFromIpPort(&tuple, 1) + assert.Equal(tuple.raw[:], tcp_tuple.raw[0:36], "Wrong TCP tuple hashable") + assert.Equal([]byte{0, 0, 0, 1}, tcp_tuple.raw[36:40], "stream_id") +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/logp/file_rotator.go b/Godeps/_workspace/src/github.com/elastic/libbeat/logp/file_rotator.go new file mode 100644 index 00000000000..af8906384b1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/logp/file_rotator.go @@ -0,0 +1,162 @@ +package logp + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "strings" +) + +const RotatorMaxFiles = 1024 +const DefaultKeepFiles = 7 +const DefaultRotateEveryBytes = 10 * 1024 * 1024 + +type FileRotator struct { + Path string + Name string + RotateEveryBytes *uint64 + KeepFiles *int + + current *os.File + current_size uint64 +} + +func (rotator *FileRotator) CreateDirectory() error { + fileinfo, err := os.Stat(rotator.Path) + if err == nil { + if !fileinfo.IsDir() { + return fmt.Errorf("%s exists but it's not a directory", rotator.Path) + } + } + + if os.IsNotExist(err) { + err = os.MkdirAll(rotator.Path, 0755) + if err != nil { + return err + } + } + + return nil +} + +func (rotator *FileRotator) CheckIfConfigSane() error { + if len(rotator.Name) == 0 { + return fmt.Errorf("File logging requires a name for the file names") + } + if rotator.KeepFiles == nil { + rotator.KeepFiles = new(int) + *rotator.KeepFiles = DefaultKeepFiles + } + if rotator.RotateEveryBytes == nil { + rotator.RotateEveryBytes = new(uint64) + *rotator.RotateEveryBytes = DefaultRotateEveryBytes + } + + if *rotator.KeepFiles < 2 || *rotator.KeepFiles >= RotatorMaxFiles { + return fmt.Errorf("The number of files to keep should be between 2 and %d", RotatorMaxFiles-1) + } + return nil +} + +func (rotator *FileRotator) WriteLine(line []byte) error { + if rotator.shouldRotate() { + err := rotator.Rotate() + if err != nil { + return err + } + } + _, err := rotator.current.Write(line) + if err != nil { + return err + } + _, err = rotator.current.Write([]byte("\n")) + if err != nil { + return err + } + rotator.current_size += uint64(len(line) + 1) + + return nil +} + +func (rotator *FileRotator) shouldRotate() bool { + if rotator.current == nil { + return true + } + + if rotator.current_size >= *rotator.RotateEveryBytes { + return true + } + + return false +} + +func (rotator *FileRotator) FilePath(file_no int) string { + if file_no == 0 { + return filepath.Join(rotator.Path, rotator.Name) + } + filename := strings.Join([]string{rotator.Name, strconv.Itoa(file_no)}, ".") + return filepath.Join(rotator.Path, filename) +} + +func (rotator *FileRotator) FileExists(file_no int) bool { + file_path := rotator.FilePath(file_no) + _, err := os.Stat(file_path) + if os.IsNotExist(err) { + return false + } + return true +} + +func (rotator *FileRotator) Rotate() error { + + if rotator.current != nil { + if err := rotator.current.Close(); err != nil { + return err + } + } + + // delete any extra files, normally we shouldn't have any + for file_no := *rotator.KeepFiles; file_no < RotatorMaxFiles; file_no++ { + if rotator.FileExists(file_no) { + perr := os.Remove(rotator.FilePath(file_no)) + if perr != nil { + return perr + } + } + } + + // shift all files from last to first + for file_no := *rotator.KeepFiles - 1; file_no >= 0; file_no-- { + if !rotator.FileExists(file_no) { + // file doesn't exist, don't rotate + continue + } + file_path := rotator.FilePath(file_no) + + if rotator.FileExists(file_no + 1) { + // next file exists, something is strange + return fmt.Errorf("File %s exists, when rotating would overwrite it", rotator.FilePath(file_no+1)) + } + + err := os.Rename(file_path, rotator.FilePath(file_no+1)) + if err != nil { + return err + } + } + + // create the new file + file_path := rotator.FilePath(0) + current, err := os.Create(file_path) + if err != nil { + return err + } + rotator.current = current + rotator.current_size = 0 + + // delete the extra file, ignore errors here + file_path = rotator.FilePath(*rotator.KeepFiles) + os.Remove(file_path) + + return nil +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/logp/file_rotator_test.go b/Godeps/_workspace/src/github.com/elastic/libbeat/logp/file_rotator_test.go new file mode 100644 index 00000000000..7602a354484 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/logp/file_rotator_test.go @@ -0,0 +1,161 @@ +package logp + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Rotator(t *testing.T) { + + if testing.Verbose() { + LogInit(LOG_DEBUG, "", false, true, []string{"rotator"}) + } + + dir, err := ioutil.TempDir("", "test_rotator_") + if err != nil { + t.Errorf("Error: %s", err.Error()) + return + } + + Debug("rotator", "Direcotry: %s", dir) + + rotateeverybytes := uint64(1000) + keepfiles := 3 + + rotator := FileRotator{ + Path: dir, + Name: "packetbeat", + RotateEveryBytes: &rotateeverybytes, + KeepFiles: &keepfiles, + } + + err = rotator.Rotate() + if err != nil { + t.Errorf("Error: %s", err.Error()) + return + } + + if _, err = os.Stat(filepath.Join(dir, "packetbeat")); os.IsNotExist(err) { + t.Errorf("File %s doesn't exist", filepath.Join(dir, "packetbeat")) + } + + if err = rotator.WriteLine([]byte("1")); err != nil { + t.Errorf("Error: %s", err.Error()) + return + } + + err = rotator.Rotate() + if err != nil { + t.Errorf("Error: %s", err.Error()) + return + } + + if err = rotator.WriteLine([]byte("2")); err != nil { + t.Errorf("Error: %s", err.Error()) + return + } + + err = rotator.Rotate() + if err != nil { + t.Errorf("Error: %s", err.Error()) + return + } + + if err = rotator.WriteLine([]byte("3")); err != nil { + t.Errorf("Error: %s", err.Error()) + return + } + + err = rotator.Rotate() + if err != nil { + t.Errorf("Error: %s", err.Error()) + return + } + + if err = rotator.WriteLine([]byte("4")); err != nil { + t.Errorf("Error: %s", err.Error()) + return + } + + file_0, err := ioutil.ReadFile(rotator.FilePath(0)) + if err != nil || bytes.Equal(file_0, []byte("4")) { + t.Errorf("Wrong contents of file 0: %s, expected: %s", string(file_0), "4") + } + + file_1, err := ioutil.ReadFile(rotator.FilePath(1)) + if err != nil || bytes.Equal(file_1, []byte("3")) { + t.Errorf("Wrong contents of file 1: %s", string(file_1)) + } + + file_2, err := ioutil.ReadFile(rotator.FilePath(2)) + if err != nil || bytes.Equal(file_2, []byte("2")) { + t.Errorf("Wrong contents of file 2: %s", string(file_2)) + } + + if rotator.FileExists(3) { + t.Errorf("File path %s shouldn't exist", rotator.FilePath(3)) + } + + os.RemoveAll(dir) +} + +func Test_Rotator_By_Bytes(t *testing.T) { + + if testing.Verbose() { + LogInit(LOG_DEBUG, "", false, true, []string{"rotator"}) + } + + dir, err := ioutil.TempDir("", "test_rotator_") + if err != nil { + t.Errorf("Error: %s", err.Error()) + return + } + + Debug("rotator", "Direcotry: %s", dir) + + rotateeverybytes := uint64(100) + keepfiles := 3 + + rotator := FileRotator{ + Path: dir, + Name: "packetbeat", + RotateEveryBytes: &rotateeverybytes, + KeepFiles: &keepfiles, + } + + for i := 0; i < 300; i++ { + rotator.WriteLine([]byte("01234567890")) + } +} + +func TestConfigSane(t *testing.T) { + rotator := FileRotator{ + Name: "test", + } + assert.Nil(t, rotator.CheckIfConfigSane()) + + keepfiles := 1023 + rotator = FileRotator{ + Name: "test", + KeepFiles: &keepfiles, + } + assert.Nil(t, rotator.CheckIfConfigSane()) + + keepfiles = 10000 + rotator = FileRotator{ + Name: "test", + KeepFiles: &keepfiles, + } + assert.NotNil(t, rotator.CheckIfConfigSane()) + + rotator = FileRotator{ + Name: "", + } + assert.NotNil(t, rotator.CheckIfConfigSane()) + +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/logp/log.go b/Godeps/_workspace/src/github.com/elastic/libbeat/logp/log.go new file mode 100644 index 00000000000..b6dad20e910 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/logp/log.go @@ -0,0 +1,168 @@ +package logp + +import ( + "fmt" + "log" + "os" + "runtime/debug" +) + +type Priority int + +const ( + // From /usr/include/sys/syslog.h. + // These are the same on Linux, BSD, and OS X. + LOG_EMERG Priority = iota + LOG_ALERT + LOG_CRIT + LOG_ERR + LOG_WARNING + LOG_NOTICE + LOG_INFO + LOG_DEBUG +) + +type Logger struct { + toSyslog bool + toStderr bool + toFile bool + level Priority + selectors map[string]bool + debug_all_selectors bool + + logger *log.Logger + syslog [LOG_DEBUG + 1]*log.Logger + rotator *FileRotator +} + +var _log Logger + +func Debug(selector string, format string, v ...interface{}) { + if _log.level >= LOG_DEBUG { + if !_log.debug_all_selectors { + selected := _log.selectors[selector] + if !selected { + return + } + } + if _log.toSyslog { + _log.syslog[LOG_INFO].Output(2, fmt.Sprintf(format, v...)) + } + if _log.toStderr { + _log.logger.Output(2, fmt.Sprintf("DBG "+format, v...)) + } + if _log.toFile { + _log.rotator.WriteLine([]byte(fmt.Sprintf("DBG "+format, v...))) + } + } +} + +func IsDebug(selector string) bool { + return _log.selectors[selector] +} + +func msg(level Priority, prefix string, format string, v ...interface{}) { + if _log.level >= level { + if _log.toSyslog { + _log.syslog[level].Output(3, fmt.Sprintf(format, v...)) + } + if _log.toStderr { + _log.logger.Output(3, fmt.Sprintf(prefix+format, v...)) + } + if _log.toFile { + _log.rotator.WriteLine([]byte(fmt.Sprintf(prefix+format, v...))) + } + } +} + +func Info(format string, v ...interface{}) { + msg(LOG_INFO, "INFO ", format, v...) +} + +func Warn(format string, v ...interface{}) { + msg(LOG_WARNING, "WARN ", format, v...) +} + +func Err(format string, v ...interface{}) { + msg(LOG_ERR, "ERR ", format, v...) +} + +func Critical(format string, v ...interface{}) { + msg(LOG_CRIT, "CRIT ", format, v...) +} + +// WTF prints the message at CRIT level and panics immediately with the same +// message +func WTF(format string, v ...interface{}) { + msg(LOG_CRIT, "CRIT ", format, v) + panic(fmt.Sprintf(format, v...)) +} + +func Recover(msg string) { + if r := recover(); r != nil { + Err("%s. Recovering, but please report this: %s.", msg, r) + Err("Stacktrace: %s", debug.Stack()) + } +} + +// TODO: remove toSyslog and toStderr from the init function +func LogInit(level Priority, prefix string, toSyslog bool, toStderr bool, debugSelectors []string) { + _log.toSyslog = toSyslog + _log.toStderr = toStderr + _log.level = level + + _log.selectors = make(map[string]bool) + for _, selector := range debugSelectors { + _log.selectors[selector] = true + if selector == "*" { + _log.debug_all_selectors = true + } + } + + if _log.toSyslog { + for prio := LOG_EMERG; prio <= LOG_DEBUG; prio++ { + _log.syslog[prio] = openSyslog(prio, prefix) + if _log.syslog[prio] == nil { + // syslog not available + _log.toSyslog = false + break + } + } + } + if _log.toStderr { + _log.logger = log.New(os.Stdout, prefix, log.Lshortfile) + } +} + +func SetToStderr(toStderr bool, prefix string) { + _log.toStderr = toStderr + if _log.toStderr { + _log.logger = log.New(os.Stdout, prefix, log.Lshortfile) + } +} + +func SetToSyslog(toSyslog bool, prefix string) { + _log.toSyslog = toSyslog + if _log.toSyslog { + for prio := LOG_EMERG; prio <= LOG_DEBUG; prio++ { + _log.syslog[prio] = openSyslog(prio, prefix) + } + } +} + +func SetToFile(toFile bool, rotator *FileRotator) error { + _log.toFile = toFile + if _log.toFile { + _log.rotator = rotator + + err := rotator.CreateDirectory() + if err != nil { + return err + } + err = rotator.CheckIfConfigSane() + if err != nil { + return err + } + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/logp/logp.go b/Godeps/_workspace/src/github.com/elastic/libbeat/logp/logp.go new file mode 100644 index 00000000000..85d7f0ccf9b --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/logp/logp.go @@ -0,0 +1,114 @@ +package logp + +import ( + "flag" + "fmt" + "io/ioutil" + "log" + "runtime" + "strings" +) + +// cmd line flags +var verbose *bool +var toStderr *bool +var debugSelectorsStr *string + +type Logging struct { + Selectors []string + Files *FileRotator + To_syslog *bool + To_files *bool +} + +// Init combines the configuration from config with the command line +// flags to initialize the Logging systems. After calling this function, +// standard output is always enabled. You can make it respect the command +// line flag with a later SetStderr call. +func Init(name string, config *Logging) error { + + logLevel := LOG_ERR + if *verbose { + logLevel = LOG_INFO + } + + debugSelectors := []string{} + if len(*debugSelectorsStr) > 0 { + debugSelectors = strings.Split(*debugSelectorsStr, ",") + logLevel = LOG_DEBUG + } + + var defaultToFiles, defaultToSyslog bool + if runtime.GOOS == "windows" { + // always disabled on windows + defaultToSyslog = false + defaultToFiles = true + } else { + defaultToSyslog = true + defaultToFiles = false + } + + var toSyslog, toFiles bool + if config.To_syslog != nil { + toSyslog = *config.To_syslog + } else { + toSyslog = defaultToSyslog + } + if config.To_files != nil { + toFiles = *config.To_files + } else { + toFiles = defaultToFiles + } + + // toStderr disables logging to syslog/files + toSyslog = toSyslog && !*toStderr + toFiles = toFiles && !*toStderr + + LogInit(Priority(logLevel), "", toSyslog, true, debugSelectors) + if len(debugSelectors) > 0 { + config.Selectors = debugSelectors + } + + if toFiles { + if config.Files == nil { + if runtime.GOOS == "windows" { + config.Files = &FileRotator{ + Path: fmt.Sprintf("C:\\ProgramData\\%s\\Logs", name), + Name: name, + } + } else { + config.Files = &FileRotator{ + Path: fmt.Sprintf("/var/log/%s", name), + Name: name, + } + } + } + err := SetToFile(true, config.Files) + if err != nil { + return err + } + } + + if IsDebug("stdlog") { + // disable standard logging by default (this is sometimes + // used by libraries and we don't want their logs to spam ours) + log.SetOutput(ioutil.Discard) + } + + return nil +} + +func SetStderr() { + if !*toStderr { + Info("Startup successful, disable stdout logging") + SetToStderr(false, "") + } +} + +// Adds logging specific flags to the flag set. The taken flags are +// -v, -e and -d. +func CmdLineFlags(flags *flag.FlagSet) { + verbose = flags.Bool("v", false, "Log at INFO level") + toStderr = flags.Bool("e", false, "Output to stdout and disable syslog/file output") + debugSelectorsStr = flags.String("d", "", "Enable certain debug selectors") +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/logp/syslog_unix.go b/Godeps/_workspace/src/github.com/elastic/libbeat/logp/syslog_unix.go new file mode 100644 index 00000000000..760c7123793 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/logp/syslog_unix.go @@ -0,0 +1,20 @@ +// +build !windows + +package logp + +import ( + "fmt" + "log" + "log/syslog" +) + +func openSyslog(level Priority, prefix string) *log.Logger { + logger, err := syslog.NewLogger(syslog.Priority(level), log.Lshortfile) + if err != nil { + fmt.Println("Error opening syslog: ", err) + return nil + } + logger.SetPrefix(prefix) + + return logger +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/logp/syslog_windows.go b/Godeps/_workspace/src/github.com/elastic/libbeat/logp/syslog_windows.go new file mode 100644 index 00000000000..5c09d26c246 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/logp/syslog_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package logp + +import "log" + +func openSyslog(level Priority, prefix string) *log.Logger { + return nil +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/api.go b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/api.go new file mode 100644 index 00000000000..db2e011ca82 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/api.go @@ -0,0 +1,336 @@ +package elasticsearch + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/elastic/libbeat/logp" +) + +type Elasticsearch struct { + MaxRetries int + connectionPool ConnectionPool + client *http.Client +} + +type QueryResult struct { + Ok bool `json:"ok"` + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Source json.RawMessage `json:"_source"` + Version int `json:"_version"` + Found bool `json:"found"` + Exists bool `json:"exists"` + Created bool `json:"created"` + Matches []string `json:"matches"` +} + +type SearchResults struct { + Took int `json:"took"` + Shards json.RawMessage `json:"_shards"` + Hits Hits `json:"hits"` + Aggs map[string]json.RawMessage `json:"aggregations"` +} + +type Hits struct { + Total int + Hits []json.RawMessage `json:"hits"` +} + +func (r QueryResult) String() string { + out, err := json.Marshal(r) + if err != nil { + return "ERROR" + } + return string(out) +} + +const ( + default_max_retries = 3 +) + +// Create a connection to Elasticsearch +func NewElasticsearch(urls []string, username string, password string) *Elasticsearch { + + var connection_pool ConnectionPool + connection_pool.SetConnections(urls, username, password) + + es := Elasticsearch{ + connectionPool: connection_pool, + client: &http.Client{}, + MaxRetries: default_max_retries, + } + return &es +} + +// Encode parameters in url +func UrlEncode(params map[string]string) string { + var values url.Values = url.Values{} + + for key, val := range params { + values.Add(key, string(val)) + } + return values.Encode() +} + +// Create path out of index, doc_type and id that is used for querying Elasticsearch +func MakePath(index string, doc_type string, id string) (string, error) { + + var path string + if len(doc_type) > 0 { + if len(id) > 0 { + path = fmt.Sprintf("/%s/%s/%s", index, doc_type, id) + } else { + path = fmt.Sprintf("/%s/%s", index, doc_type) + } + } else { + if len(id) > 0 { + if len(index) > 0 { + path = fmt.Sprintf("/%s/%s", index, id) + } else { + path = fmt.Sprintf("/%s", id) + } + } else { + path = fmt.Sprintf("/%s", index) + } + } + return path, nil +} + +func ReadQueryResult(obj []byte) (*QueryResult, error) { + + var result QueryResult + if obj == nil { + return nil, nil + } + err := json.Unmarshal(obj, &result) + if err != nil { + return nil, err + } + return &result, err +} + +func ReadSearchResult(obj []byte) (*SearchResults, error) { + + var result SearchResults + if obj == nil { + return nil, nil + } + err := json.Unmarshal(obj, &result) + if err != nil { + return nil, err + } + return &result, err +} + +func (es *Elasticsearch) SetMaxRetries(max_retries int) { + es.MaxRetries = max_retries +} + +func isConnTimeout(err error) bool { + return strings.Contains(err.Error(), "i/o timeout") +} + +func isConnRefused(err error) bool { + return strings.Contains(err.Error(), "connection refused") +} + +// Perform the actual request. If the operation was successful, mark it as live and return the response. +// Mark the Elasticsearch node as dead for a period of time in the case the http request fails with Connection +// Timeout, Connection Refused or returns one of the 503,504 Error Replies. +// It returns the response, if it should retry sending the request and the error +func (es *Elasticsearch) PerformRequest(conn *Connection, req *http.Request) ([]byte, bool, error) { + + req.Header.Add("Accept", "application/json") + if conn.Username != "" || conn.Password != "" { + req.SetBasicAuth(conn.Username, conn.Password) + } + + resp, err := es.client.Do(req) + if err != nil { + // request fails + do_retry := false + if isConnTimeout(err) || isConnRefused(err) { + es.connectionPool.MarkDead(conn) + do_retry = true + } + return nil, do_retry, fmt.Errorf("Sending the request fails: %s", err) + } + + if resp.StatusCode > 299 { + // request fails + do_retry := false + if resp.StatusCode == http.StatusServiceUnavailable || + resp.StatusCode == http.StatusGatewayTimeout { + // status code in {503, 504} + es.connectionPool.MarkDead(conn) + do_retry = true + } + return nil, do_retry, fmt.Errorf("%v", resp.Status) + } + + defer resp.Body.Close() + obj, err := ioutil.ReadAll(resp.Body) + if err != nil { + es.connectionPool.MarkDead(conn) + return nil, true, fmt.Errorf("Reading the response fails: %s", err) + } + + // request with success + es.connectionPool.MarkLive(conn) + + return obj, false, nil + +} + +// Create an HTTP request and send it to Elasticsearch. The request is retransmitted max_retries +// before returning an error. +func (es *Elasticsearch) Request(method string, path string, + params map[string]string, body interface{}) ([]byte, error) { + + var errors []error + + for attempt := 0; attempt < es.MaxRetries; attempt++ { + + conn := es.connectionPool.GetConnection() + logp.Debug("elasticsearch", "Use connection %s", conn.Url) + + url := conn.Url + path + if len(params) > 0 { + url = url + "?" + UrlEncode(params) + } + + logp.Debug("elasticsearch", "%s %s %s", method, url, body) + + var obj []byte + var err error + if body != nil { + obj, err = json.Marshal(body) + if err != nil { + return nil, fmt.Errorf("Fail to JSON encode the body: %s", err) + } + } else { + obj = nil + } + req, err := http.NewRequest(method, url, bytes.NewReader(obj)) + if err != nil { + return nil, fmt.Errorf("NewRequest fails: %s", err) + } + + resp, retry, err := es.PerformRequest(conn, req) + if retry == true { + // retry + if err != nil { + errors = append(errors, err) + } + continue + } + if err != nil { + return nil, err + } + return resp, nil + + } + + logp.Warn("Request fails to be send after %d retries", es.MaxRetries) + + return nil, fmt.Errorf("Request fails after %d retries. Errors: %v", es.MaxRetries, errors) +} + +// Index adds or updates a typed JSON document in a specified index, making it +// searchable. In case id is empty, a new id is created over a HTTP POST request. +// Otherwise, a HTTP PUT request is issued. +// Implements: http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html +func (es *Elasticsearch) Index(index string, doc_type string, id string, + params map[string]string, body interface{}) (*QueryResult, error) { + + var method string + + path, err := MakePath(index, doc_type, id) + if err != nil { + return nil, fmt.Errorf("MakePath fails: %s", err) + } + if len(id) == 0 { + method = "POST" + } else { + method = "PUT" + } + resp, err := es.Request(method, path, params, body) + if err != nil { + return nil, err + } + return ReadQueryResult(resp) +} + +// Refresh an index. Call this after doing inserts or creating/deleting +// indexes in unit tests. +func (es *Elasticsearch) Refresh(index string) (*QueryResult, error) { + path, err := MakePath(index, "", "_refresh") + if err != nil { + return nil, err + } + resp, err := es.Request("POST", path, nil, nil) + if err != nil { + return nil, err + } + + return ReadQueryResult(resp) +} + +// Creates a new index, optionally with settings and mappings passed in +// the body. +// Implements: https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html +// +func (es *Elasticsearch) CreateIndex(index string, body interface{}) (*QueryResult, error) { + + path, err := MakePath(index, "", "") + if err != nil { + return nil, err + } + + resp, err := es.Request("PUT", path, nil, body) + if err != nil { + return nil, err + } + + return ReadQueryResult(resp) +} + +// Deletes a typed JSON document from a specific index based on its id. +// Implements: http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html +func (es *Elasticsearch) Delete(index string, doc_type string, id string, params map[string]string) (*QueryResult, error) { + + path, err := MakePath(index, doc_type, id) + if err != nil { + return nil, err + } + + resp, err := es.Request("DELETE", path, params, nil) + if err != nil { + return nil, err + } + + return ReadQueryResult(resp) +} + +// A search request can be executed purely using a URI by providing request parameters. +// Implements: http://www.elastic.co/guide/en/elasticsearch/reference/current/search-uri-request.html +func (es *Elasticsearch) SearchUri(index string, doc_type string, params map[string]string) (*SearchResults, error) { + + path, err := MakePath(index, doc_type, "_search") + if err != nil { + return nil, err + } + + resp, err := es.Request("GET", path, params, nil) + if err != nil { + return nil, err + } + return ReadSearchResult(resp) +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/api_mock_test.go b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/api_mock_test.go new file mode 100644 index 00000000000..cd289e73c83 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/api_mock_test.go @@ -0,0 +1,182 @@ +package elasticsearch + +import ( + "fmt" + "os" + + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/elastic/libbeat/logp" +) + +func ElasticsearchMock(code int, body []byte) *httptest.Server { + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(code) + if body != nil { + w.Header().Set("Content-Type", "application/json") + w.Write(body) + } + })) + + return server +} + +func TestOneHostSuccessResp(t *testing.T) { + + if testing.Verbose() { + logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"elasticsearch"}) + } + + index := fmt.Sprintf("packetbeat-unittest-%d", os.Getpid()) + body := map[string]interface{}{ + "user": "test", + "post_date": "2009-11-15T14:12:12", + "message": "trying out", + } + expected_resp, _ := json.Marshal(QueryResult{Ok: true, Index: index, Type: "test", Id: "1", Version: 1, Created: true}) + + server := ElasticsearchMock(200, expected_resp) + + es := NewElasticsearch([]string{server.URL}, "", "") + + params := map[string]string{ + "refresh": "true", + } + resp, err := es.Index(index, "test", "1", params, body) + if err != nil { + t.Errorf("Index() returns error: %s", err) + } + if !resp.Created { + t.Errorf("Index() fails: %s", resp) + } +} + +func TestOneHost500Resp(t *testing.T) { + + if testing.Verbose() { + logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"elasticsearch"}) + } + + index := fmt.Sprintf("packetbeat-unittest-%d", os.Getpid()) + body := map[string]interface{}{ + "user": "test", + "post_date": "2009-11-15T14:12:12", + "message": "trying out", + } + + server := ElasticsearchMock(http.StatusInternalServerError, []byte("Something wrong happened")) + + es := NewElasticsearch([]string{server.URL}, "", "") + + params := map[string]string{ + "refresh": "true", + } + _, err := es.Index(index, "test", "1", params, body) + if err == nil { + t.Errorf("Index() should return error.") + } + + if !strings.Contains(err.Error(), "500 Internal Server Error") { + t.Errorf("Should return <500 Internal Server Error> instead of %v", err) + } +} + +func TestOneHost503Resp(t *testing.T) { + + if testing.Verbose() { + logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"elasticsearch"}) + } + + index := fmt.Sprintf("packetbeat-unittest-%d", os.Getpid()) + body := map[string]interface{}{ + "user": "test", + "post_date": "2009-11-15T14:12:12", + "message": "trying out", + } + + server := ElasticsearchMock(503, []byte("Something wrong happened")) + + es := NewElasticsearch([]string{server.URL}, "", "") + + params := map[string]string{ + "refresh": "true", + } + _, err := es.Index(index, "test", "1", params, body) + if err == nil { + t.Errorf("Index() should return error.") + } + + if !strings.Contains(err.Error(), "retries. Errors") { + t.Errorf("Should return instead of %v", err) + } +} + +func TestMultipleHosts(t *testing.T) { + + if testing.Verbose() { + logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"elasticsearch"}) + } + + index := fmt.Sprintf("packetbeat-unittest-%d", os.Getpid()) + body := map[string]interface{}{ + "user": "test", + "post_date": "2009-11-15T14:12:12", + "message": "trying out", + } + expected_resp, _ := json.Marshal(QueryResult{Ok: true, Index: index, Type: "test", Id: "1", Version: 1, Created: true}) + + server1 := ElasticsearchMock(503, []byte("Something went wrong")) + server2 := ElasticsearchMock(200, expected_resp) + + logp.Debug("elasticsearch", "%s, %s", server1.URL, server2.URL) + es := NewElasticsearch([]string{server1.URL, server2.URL}, "", "") + + params := map[string]string{ + "refresh": "true", + } + resp, err := es.Index(index, "test", "1", params, body) + if err != nil { + t.Errorf("Index() returns error: %s", err) + } + if !resp.Created { + t.Errorf("Index() fails: %s", resp) + } + +} + +func TestMultipleFailingHosts(t *testing.T) { + + if testing.Verbose() { + logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"elasticsearch"}) + } + + index := fmt.Sprintf("packetbeat-unittest-%d", os.Getpid()) + body := map[string]interface{}{ + "user": "test", + "post_date": "2009-11-15T14:12:12", + "message": "trying out", + } + server1 := ElasticsearchMock(503, []byte("Something went wrong")) + server2 := ElasticsearchMock(500, []byte("Something went wrong")) + + logp.Debug("elasticsearch", "%s, %s", server1.URL, server2.URL) + es := NewElasticsearch([]string{server1.URL, server2.URL}, "", "") + + params := map[string]string{ + "refresh": "true", + } + _, err := es.Index(index, "test", "1", params, body) + if err == nil { + t.Errorf("Index() should return error.") + } + + if !strings.Contains(err.Error(), "500 Internal Server Error") { + t.Errorf("Should return <500 Internal Server Error> instead of %v", err) + } + +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/api_test.go b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/api_test.go new file mode 100644 index 00000000000..93b4b8fd8e7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/api_test.go @@ -0,0 +1,128 @@ +package elasticsearch + +import ( + "fmt" + "os" + "testing" + + "github.com/elastic/libbeat/logp" +) + +func GetTestingElasticsearch() *Elasticsearch { + var es_url string + + // read the Elasticsearch port from the ES_PORT env variable + port := os.Getenv("ES_PORT") + if len(port) > 0 { + es_url = "http://localhost:" + port + } else { + // empty variable + es_url = "http://localhost:9200" + } + + return NewElasticsearch([]string{es_url}, "", "") +} + +func TestUrlEncode(t *testing.T) { + + params := map[string]string{ + "q": "agent:appserver1", + } + url := UrlEncode(params) + + if url != "q=agent%3Aappserver1" { + t.Errorf("Fail to encode params: %s", url) + } + + params = map[string]string{ + "wife": "sarah", + "husband": "joe", + } + + url = UrlEncode(params) + + if url != "husband=joe&wife=sarah" { + t.Errorf("Fail to encode params: %s", url) + } +} + +func TestMakePath(t *testing.T) { + path, err := MakePath("twitter", "tweet", "1") + if err != nil { + t.Errorf("Fail to create path: %s", err) + } + if path != "/twitter/tweet/1" { + t.Errorf("Wrong path created: %s", path) + } + + path, err = MakePath("twitter", "", "_refresh") + if err != nil { + t.Errorf("Fail to create path: %s", err) + } + if path != "/twitter/_refresh" { + t.Errorf("Wrong path created: %s", path) + } + + path, err = MakePath("", "", "_bulk") + if err != nil { + t.Errorf("Fail to create path: %s", err) + } + if path != "/_bulk" { + t.Errorf("Wrong path created: %s", path) + } + path, err = MakePath("twitter", "", "") + if err != nil { + t.Errorf("Fail to create path: %s", err) + } + if path != "/twitter" { + t.Errorf("Wrong path created: %s", path) + } + +} + +func TestIndex(t *testing.T) { + + if testing.Verbose() { + logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"elasticsearch"}) + } + + if testing.Short() { + t.Skip("Skipping in short mode, because it requires Elasticsearch") + } + + es := GetTestingElasticsearch() + + index := fmt.Sprintf("packetbeat-unittest-%d", os.Getpid()) + + body := map[string]interface{}{ + "user": "test", + "post_date": "2009-11-15T14:12:12", + "message": "trying out", + } + params := map[string]string{ + "refresh": "true", + } + resp, err := es.Index(index, "test", "1", params, body) + if err != nil { + t.Errorf("Index() returns error: %s", err) + } + if !resp.Created { + t.Errorf("Index() fails: %s", resp) + } + + params = map[string]string{ + "q": "user:test", + } + result, err := es.SearchUri(index, "test", params) + if err != nil { + t.Errorf("SearchUri() returns an error: %s", err) + } + if result.Hits.Total != 1 { + t.Errorf("Wrong number of search results: %d", result.Hits.Total) + } + + resp, err = es.Delete(index, "test", "1", nil) + if err != nil { + t.Errorf("Delete() returns error: %s", err) + } +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/bulkapi.go b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/bulkapi.go new file mode 100644 index 00000000000..52293c32cc6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/bulkapi.go @@ -0,0 +1,88 @@ +package elasticsearch + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/elastic/libbeat/common" + "github.com/elastic/libbeat/logp" +) + +type EventMsg struct { + Ts time.Time + Event common.MapStr +} + +// Create a HTTP request containing a bunch of operations and send them to Elasticsearch. +// The request is retransmitted up to max_retries before returning an error. +func (es *Elasticsearch) BulkRequest(method string, path string, + params map[string]string, body chan interface{}) ([]byte, error) { + + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + for obj := range body { + enc.Encode(obj) + } + + if buf.Len() == 0 { + logp.Debug("elasticsearch", "Empty channel. Wait for more data.") + return nil, nil + } + + var errors []error + + for attempt := 0; attempt < es.MaxRetries; attempt++ { + + conn := es.connectionPool.GetConnection() + logp.Debug("elasticsearch", "Use connection %s", conn.Url) + + url := conn.Url + path + if len(params) > 0 { + url = url + "?" + UrlEncode(params) + } + logp.Debug("elasticsearch", "Sending bulk request to %s", url) + + req, err := http.NewRequest(method, url, &buf) + if err != nil { + return nil, fmt.Errorf("NewRequest fails: %s", err) + } + + resp, retry, err := es.PerformRequest(conn, req) + if retry == true { + // retry + if err != nil { + errors = append(errors, err) + } + continue + } + if err != nil { + return nil, fmt.Errorf("PerformRequest fails: %s", err) + } + return resp, nil + } + + logp.Warn("Request fails to be send after %d retries", es.MaxRetries) + + return nil, fmt.Errorf("Request fails after %d retries. Errors: %v", es.MaxRetries, errors) +} + +// Perform many index/delete operations in a single API call. +// Implements: http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html +func (es *Elasticsearch) Bulk(index string, doc_type string, + params map[string]string, body chan interface{}) (*QueryResult, error) { + + path, err := MakePath(index, doc_type, "_bulk") + if err != nil { + return nil, err + } + + resp, err := es.BulkRequest("POST", path, params, body) + if err != nil { + return nil, err + } + + return ReadQueryResult(resp) +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/bulkapi_mock_test.go b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/bulkapi_mock_test.go new file mode 100644 index 00000000000..b63b6863157 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/bulkapi_mock_test.go @@ -0,0 +1,187 @@ +package elasticsearch + +import ( + "encoding/json" + "fmt" + "net/http" + "os" + "strings" + "testing" + + "github.com/elastic/libbeat/logp" +) + +func TestOneHostSuccessResp_Bulk(t *testing.T) { + + if testing.Verbose() { + logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"elasticsearch"}) + } + + index := fmt.Sprintf("packetbeat-unittest-%d", os.Getpid()) + expected_resp, _ := json.Marshal(QueryResult{Ok: true, Index: index, Type: "type1", Id: "1", Version: 1, Created: true}) + + ops := []map[string]interface{}{ + map[string]interface{}{ + "index": map[string]interface{}{ + "_index": index, + "_type": "type1", + "_id": "1", + }, + }, + map[string]interface{}{ + "field1": "value1", + }, + } + + body := make(chan interface{}, 10) + for _, op := range ops { + body <- op + } + close(body) + + server := ElasticsearchMock(200, expected_resp) + + es := NewElasticsearch([]string{server.URL}, "", "") + + params := map[string]string{ + "refresh": "true", + } + resp, err := es.Bulk(index, "type1", params, body) + if err != nil { + t.Errorf("Bulk() returns error: %s", err) + } + if !resp.Created { + t.Errorf("Bulk() fails: %s", resp) + } +} + +func TestOneHost500Resp_Bulk(t *testing.T) { + if testing.Verbose() { + logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"elasticsearch"}) + } + + index := fmt.Sprintf("packetbeat-unittest-%d", os.Getpid()) + + ops := []map[string]interface{}{ + map[string]interface{}{ + "index": map[string]interface{}{ + "_index": index, + "_type": "type1", + "_id": "1", + }, + }, + map[string]interface{}{ + "field1": "value1", + }, + } + + body := make(chan interface{}, 10) + for _, op := range ops { + body <- op + } + close(body) + + server := ElasticsearchMock(http.StatusInternalServerError, []byte("Something wrong happened")) + + es := NewElasticsearch([]string{server.URL}, "", "") + + params := map[string]string{ + "refresh": "true", + } + _, err := es.Bulk(index, "type1", params, body) + if err == nil { + t.Errorf("Bulk() should return error.") + } + + if !strings.Contains(err.Error(), "500 Internal Server Error") { + t.Errorf("Should return <500 Internal Server Error> instead of %v", err) + } +} + +func TestOneHost503Resp_Bulk(t *testing.T) { + if testing.Verbose() { + logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"elasticsearch"}) + } + + index := fmt.Sprintf("packetbeat-unittest-%d", os.Getpid()) + + ops := []map[string]interface{}{ + map[string]interface{}{ + "index": map[string]interface{}{ + "_index": index, + "_type": "type1", + "_id": "1", + }, + }, + map[string]interface{}{ + "field1": "value1", + }, + } + + body := make(chan interface{}, 10) + for _, op := range ops { + body <- op + } + close(body) + + server := ElasticsearchMock(503, []byte("Something wrong happened")) + + es := NewElasticsearch([]string{server.URL}, "", "") + + params := map[string]string{ + "refresh": "true", + } + _, err := es.Bulk(index, "type1", params, body) + if err == nil { + t.Errorf("Bulk() should return error.") + } + + if !strings.Contains(err.Error(), "retries. Errors") { + t.Errorf("Should return instead of %v", err) + } +} + +func TestMultipleHost_Bulk(t *testing.T) { + + if testing.Verbose() { + logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"elasticsearch"}) + } + + index := fmt.Sprintf("packetbeat-unittest-%d", os.Getpid()) + expected_resp, _ := json.Marshal(QueryResult{Ok: true, Index: index, Type: "type1", Id: "1", Version: 1, Created: true}) + + ops := []map[string]interface{}{ + map[string]interface{}{ + "index": map[string]interface{}{ + "_index": index, + "_type": "type1", + "_id": "1", + }, + }, + map[string]interface{}{ + "field1": "value1", + }, + } + + body := make(chan interface{}, 10) + for _, op := range ops { + body <- op + } + close(body) + + server1 := ElasticsearchMock(503, []byte("Somehting went wrong")) + server2 := ElasticsearchMock(200, expected_resp) + + es := NewElasticsearch([]string{server1.URL, server2.URL}, "", "") + + params := map[string]string{ + "refresh": "true", + } + resp, err := es.Bulk(index, "type1", params, body) + if err != nil { + t.Errorf("Bulk() returns error: %s", err) + } + if !resp.Created { + t.Errorf("Bulk() fails: %s", resp) + } +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/bulkapi_test.go b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/bulkapi_test.go new file mode 100644 index 00000000000..73df90cbbb5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/bulkapi_test.go @@ -0,0 +1,183 @@ +package elasticsearch + +import ( + "fmt" + "os" + "testing" + + "github.com/elastic/libbeat/logp" +) + +func TestBulk(t *testing.T) { + if testing.Verbose() { + logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"elasticsearch"}) + } + if testing.Short() { + t.Skip("Skipping in short mode, because it requires Elasticsearch") + } + es := GetTestingElasticsearch() + index := fmt.Sprintf("packetbeat-unittest-%d", os.Getpid()) + + ops := []map[string]interface{}{ + map[string]interface{}{ + "index": map[string]interface{}{ + "_index": index, + "_type": "type1", + "_id": "1", + }, + }, + map[string]interface{}{ + "field1": "value1", + }, + } + + body := make(chan interface{}, 10) + for _, op := range ops { + body <- op + } + close(body) + + params := map[string]string{ + "refresh": "true", + } + _, err := es.Bulk(index, "type1", params, body) + if err != nil { + t.Errorf("Bulk() returned error: %s", err) + } + + params = map[string]string{ + "q": "field1:value1", + } + result, err := es.SearchUri(index, "type1", params) + if err != nil { + t.Errorf("SearchUri() returns an error: %s", err) + } + if result.Hits.Total != 1 { + t.Errorf("Wrong number of search results: %d", result.Hits.Total) + } + + _, err = es.Delete(index, "", "", nil) + if err != nil { + t.Errorf("Delete() returns error: %s", err) + } +} + +func TestEmptyBulk(t *testing.T) { + if testing.Verbose() { + logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"elasticsearch"}) + } + if testing.Short() { + t.Skip("Skipping in short mode, because it requires Elasticsearch") + } + es := GetTestingElasticsearch() + index := fmt.Sprintf("packetbeat-unittest-%d", os.Getpid()) + + body := make(chan interface{}, 10) + close(body) + + params := map[string]string{ + "refresh": "true", + } + resp, err := es.Bulk(index, "type1", params, body) + if err != nil { + t.Errorf("Bulk() returned error: %s", err) + } + if resp != nil { + t.Errorf("Unexpected response: %s", resp) + } +} + +func TestBulkMoreOperations(t *testing.T) { + if testing.Verbose() { + logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"elasticsearch"}) + } + if testing.Short() { + t.Skip("Skipping in short mode, because it requires Elasticsearch") + } + es := GetTestingElasticsearch() + index := fmt.Sprintf("packetbeat-unittest-%d", os.Getpid()) + + ops := []map[string]interface{}{ + map[string]interface{}{ + "index": map[string]interface{}{ + "_index": index, + "_type": "type1", + "_id": "1", + }, + }, + map[string]interface{}{ + "field1": "value1", + }, + map[string]interface{}{ + "delete": map[string]interface{}{ + "_index": index, + "_type": "type1", + "_id": "2", + }, + }, + map[string]interface{}{ + "create": map[string]interface{}{ + "_index": index, + "_type": "type1", + "_id": "3", + }, + }, + map[string]interface{}{ + "field1": "value3", + }, + map[string]interface{}{ + "update": map[string]interface{}{ + "_id": "1", + "_index": index, + "_type": "type1", + }, + }, + map[string]interface{}{ + "doc": map[string]interface{}{ + "field2": "value2", + }, + }, + } + + body := make(chan interface{}, 10) + for _, op := range ops { + body <- op + } + close(body) + + params := map[string]string{ + "refresh": "true", + } + resp, err := es.Bulk(index, "type1", params, body) + if err != nil { + t.Errorf("Bulk() returned error: %s [%s]", err, resp) + return + } + + params = map[string]string{ + "q": "field1:value3", + } + result, err := es.SearchUri(index, "type1", params) + if err != nil { + t.Errorf("SearchUri() returns an error: %s", err) + } + if result.Hits.Total != 1 { + t.Errorf("Wrong number of search results: %d", result.Hits.Total) + } + + params = map[string]string{ + "q": "field2:value2", + } + result, err = es.SearchUri(index, "type1", params) + if err != nil { + t.Errorf("SearchUri() returns an error: %s", err) + } + if result.Hits.Total != 1 { + t.Errorf("Wrong number of search results: %d", result.Hits.Total) + } + + _, err = es.Delete(index, "", "", nil) + if err != nil { + t.Errorf("Delete() returns error: %s", err) + } +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/connection_pool.go b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/connection_pool.go new file mode 100644 index 00000000000..23dc9f9aabc --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/connection_pool.go @@ -0,0 +1,114 @@ +package elasticsearch + +import ( + "math" + "math/rand" + "time" + + "github.com/elastic/libbeat/logp" +) + +type Connection struct { + Url string + Username string + Password string + + dead bool + dead_count int + timer *time.Timer +} + +const ( + default_dead_timeout = 60 //seconds +) + +type ConnectionPool struct { + Connections []*Connection + rr int //round robin + + // options + Dead_timeout time.Duration +} + +func (pool *ConnectionPool) SetConnections(urls []string, username string, password string) error { + + var connections []*Connection + + for _, url := range urls { + conn := Connection{ + Url: url, + Username: username, + Password: password, + } + // set default settings + conn.dead_count = 0 + connections = append(connections, &conn) + } + pool.Connections = connections + pool.rr = -1 + pool.Dead_timeout = default_dead_timeout + return nil +} + +func (pool *ConnectionPool) SetDeadTimeout(timeout int) { + pool.Dead_timeout = time.Duration(timeout) +} + +func (pool *ConnectionPool) SelectRoundRobin() *Connection { + + for count := 0; count < len(pool.Connections); count++ { + + pool.rr += 1 + pool.rr = pool.rr % len(pool.Connections) + conn := pool.Connections[pool.rr] + if conn.dead == false { + return conn + } + } + + // no connection is alive, return a random connection + pool.rr = rand.Intn(len(pool.Connections)) + return pool.Connections[pool.rr] +} + +func (pool *ConnectionPool) GetConnection() *Connection { + + if len(pool.Connections) > 1 { + return pool.SelectRoundRobin() + } + // only one connection, no need to select one connection + return pool.Connections[0] +} + +// If a connection fails, it will be marked as dead and put on timeout. +// timeout = default_timeout * 2 ** (fail_count - 1) +// When the timeout is over, the connection will be resurrected and +// returned to the live pool +func (pool *ConnectionPool) MarkDead(conn *Connection) error { + + if !conn.dead { + logp.Debug("elasticsearch", "Mark dead %s", conn.Url) + conn.dead = true + conn.dead_count = conn.dead_count + 1 + timeout := pool.Dead_timeout * time.Duration(math.Pow(2, float64(conn.dead_count)-1)) + conn.timer = time.AfterFunc(timeout*time.Second, func() { + // timeout expires + conn.dead = false + logp.Debug("elasticsearch", "Timeout expired. Mark it as alive: %s", conn.Url) + }) + } + + return nil +} + +// A connection that has been previously marked as dead and succeeds will be marked +// as live and the dead_count is set to zero +func (pool *ConnectionPool) MarkLive(conn *Connection) error { + if conn.dead { + logp.Debug("elasticsearch", "Mark live %s", conn.Url) + conn.dead = false + conn.dead_count = 0 + conn.timer.Stop() + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/connection_pool_test.go b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/connection_pool_test.go new file mode 100644 index 00000000000..6fca8d989df --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/connection_pool_test.go @@ -0,0 +1,134 @@ +package elasticsearch + +import ( + "testing" + "time" + + "github.com/elastic/libbeat/logp" +) + +func TestRoundRobin(t *testing.T) { + + var pool ConnectionPool + + urls := []string{"localhost:9200", "localhost:9201"} + + err := pool.SetConnections(urls, "test", "secret") + + if err != nil { + t.Errorf("Fail to set the connections: %s", err) + } + + conn := pool.GetConnection() + + if conn.Url != "localhost:9200" { + t.Errorf("Wrong connection returned: %s", conn.Url) + } + + conn = pool.GetConnection() + if conn.Url != "localhost:9201" { + t.Errorf("Wrong connection returned: %s", conn.Url) + } +} + +func TestMarkDead(t *testing.T) { + + var pool ConnectionPool + + urls := []string{"localhost:9200", "localhost:9201"} + + err := pool.SetConnections(urls, "test", "secret") + + if err != nil { + t.Errorf("Fail to set the connections: %s", err) + } + + conn := pool.GetConnection() + + if conn.Url != "localhost:9200" { + t.Errorf("Wrong connection returned: %s", conn.Url) + } + pool.MarkDead(conn) + + conn = pool.GetConnection() + if conn.Url != "localhost:9201" { + t.Errorf("Wrong connection returned: %s", conn.Url) + } + + conn = pool.GetConnection() + if conn.Url != "localhost:9201" { + t.Errorf("Wrong connection returned: %s", conn.Url) + } + pool.MarkDead(conn) + + conn = pool.GetConnection() + if conn.Url != "localhost:9201" && conn.Url != "localhost:9200" { + t.Errorf("No expected connection returned") + } + +} + +func TestDeadTimeout(t *testing.T) { + + if testing.Verbose() { + logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"elasticsearch"}) + } + + var pool ConnectionPool + + urls := []string{"localhost:9200", "localhost:9201"} + + err := pool.SetConnections(urls, "test", "secret") + if err != nil { + t.Errorf("Fail to set the connections: %s", err) + } + pool.SetDeadTimeout(10) + + conn := pool.GetConnection() + + if conn.Url != "localhost:9200" { + t.Errorf("Wrong connection returned: %s", conn.Url) + } + pool.MarkDead(conn) + time.Sleep(10 * time.Second) + + conn = pool.GetConnection() + if conn.Url != "localhost:9201" { + t.Errorf("Wrong connection returned: %s", conn.Url) + } + + conn = pool.GetConnection() + if conn.Url != "localhost:9200" { + t.Errorf("Wrong connection returned: %s", conn.Url) + } +} + +func TestMarkLive(t *testing.T) { + + var pool ConnectionPool + + urls := []string{"localhost:9200", "localhost:9201"} + + err := pool.SetConnections(urls, "test", "secret") + + if err != nil { + t.Errorf("Fail to set the connections: %s", err) + } + + conn := pool.GetConnection() + if conn.Url != "localhost:9200" { + t.Errorf("Wrong connection returned: %s", conn.Url) + } + pool.MarkDead(conn) + pool.MarkLive(conn) + + conn = pool.GetConnection() + if conn.Url != "localhost:9201" { + t.Errorf("Wrong connection returned: %s", conn.Url) + } + conn = pool.GetConnection() + if conn.Url != "localhost:9200" { + t.Errorf("Wrong connection returned: %s", conn.Url) + } + +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/output.go b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/output.go new file mode 100644 index 00000000000..a0e059e75f4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/output.go @@ -0,0 +1,271 @@ +package elasticsearch + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/elastic/libbeat/common" + "github.com/elastic/libbeat/logp" + "github.com/elastic/libbeat/outputs" +) + +type ElasticsearchOutput struct { + Index string + TopologyExpire int + Conn *Elasticsearch + FlushInterval time.Duration + BulkMaxSize int + + TopologyMap map[string]string + sendingQueue chan EventMsg + + ttlEnabled bool +} + +type PublishedTopology struct { + Name string + IPs string +} + +// Initialize Elasticsearch as output +func (out *ElasticsearchOutput) Init(config outputs.MothershipConfig, topology_expire int) error { + + if len(config.Protocol) == 0 { + config.Protocol = "http" + } + + var urls []string + + if len(config.Hosts) > 0 { + // use hosts setting + for _, host := range config.Hosts { + url := fmt.Sprintf("%s://%s%s", config.Protocol, host, config.Path) + urls = append(urls, url) + } + } else { + // use host and port settings + url := fmt.Sprintf("%s://%s:%d%s", config.Protocol, config.Host, config.Port, config.Path) + urls = append(urls, url) + } + + es := NewElasticsearch(urls, config.Username, config.Password) + out.Conn = es + + if config.Index != "" { + out.Index = config.Index + } else { + out.Index = "packetbeat" + } + + out.TopologyExpire = 15000 + if topology_expire != 0 { + out.TopologyExpire = topology_expire /*sec*/ * 1000 // millisec + } + + out.FlushInterval = 1000 * time.Millisecond + if config.Flush_interval != nil { + out.FlushInterval = time.Duration(*config.Flush_interval) * time.Millisecond + } + out.BulkMaxSize = 10000 + if config.Bulk_size != nil { + out.BulkMaxSize = *config.Bulk_size + } + + if config.Max_retries != nil { + out.Conn.SetMaxRetries(*config.Max_retries) + } + + logp.Info("[ElasticsearchOutput] Using Elasticsearch %s", urls) + logp.Info("[ElasticsearchOutput] Using index pattern [%s-]YYYY.MM.DD", out.Index) + logp.Info("[ElasticsearchOutput] Topology expires after %ds", out.TopologyExpire/1000) + if out.FlushInterval > 0 { + logp.Info("[ElasticsearchOutput] Insert events in batches. Flush interval is %s. Bulk size is %d.", out.FlushInterval, out.BulkMaxSize) + } else { + logp.Info("[ElasticsearchOutput] Insert events one by one. This might affect the performance of the shipper.") + } + + if config.Save_topology { + err := out.EnableTTL() + if err != nil { + logp.Err("Fail to set _ttl mapping: %s", err) + // keep trying in the background + go func() { + for { + err := out.EnableTTL() + if err == nil { + break + } + logp.Err("Fail to set _ttl mapping: %s", err) + time.Sleep(5 * time.Second) + } + }() + } + } + + out.sendingQueue = make(chan EventMsg, 1000) + go out.SendMessagesGoroutine() + + return nil +} + +// Enable using ttl as paramters in a server-ip doc type +func (out *ElasticsearchOutput) EnableTTL() error { + + // make sure the .packetbeat-topology index exists + out.Conn.CreateIndex(".packetbeat-topology", nil) + + setting := map[string]interface{}{ + "server-ip": map[string]interface{}{ + "_ttl": map[string]string{"enabled": "true", "default": "15s"}, + }, + } + + _, err := out.Conn.Index(".packetbeat-topology", "server-ip", "_mapping", nil, setting) + if err != nil { + return err + } + + out.ttlEnabled = true + + return nil +} + +// Get the name of a shipper by its IP address from the local topology map +func (out *ElasticsearchOutput) GetNameByIP(ip string) string { + name, exists := out.TopologyMap[ip] + if !exists { + return "" + } + return name +} + +// Insert a list of events in the bulkChannel +func (out *ElasticsearchOutput) InsertBulkMessage(bulkChannel chan interface{}) { + close(bulkChannel) + go func(channel chan interface{}) { + _, err := out.Conn.Bulk("", "", nil, channel) + if err != nil { + logp.Err("Fail to perform many index operations in a single API call: %s", err) + } + }(bulkChannel) +} + +// Goroutine that sends one or multiple events to Elasticsearch. +// If the flush_interval > 0, then the events are sent in batches. Otherwise, one by one. +func (out *ElasticsearchOutput) SendMessagesGoroutine() { + flushChannel := make(<-chan time.Time) + + if out.FlushInterval > 0 { + flushTicker := time.NewTicker(out.FlushInterval) + flushChannel = flushTicker.C + } + + bulkChannel := make(chan interface{}, out.BulkMaxSize) + + for { + select { + case msg := <-out.sendingQueue: + index := fmt.Sprintf("%s-%d.%02d.%02d", out.Index, msg.Ts.Year(), msg.Ts.Month(), msg.Ts.Day()) + if out.FlushInterval > 0 { + // insert the events in batches + if len(bulkChannel)+2 > out.BulkMaxSize { + logp.Debug("output_elasticsearch", "Channel size reached. Calling bulk") + out.InsertBulkMessage(bulkChannel) + bulkChannel = make(chan interface{}, out.BulkMaxSize) + } + bulkChannel <- map[string]interface{}{ + "index": map[string]interface{}{ + "_index": index, + "_type": msg.Event["type"].(string), + }, + } + bulkChannel <- msg.Event + } else { + // insert the events one by one + _, err := out.Conn.Index(index, msg.Event["type"].(string), "", nil, msg.Event) + if err != nil { + logp.Err("Fail to insert a single event: %s", err) + } + } + case _ = <-flushChannel: + out.InsertBulkMessage(bulkChannel) + bulkChannel = make(chan interface{}, out.BulkMaxSize) + } + } +} + +// Each shipper publishes a list of IPs together with its name to Elasticsearch +func (out *ElasticsearchOutput) PublishIPs(name string, localAddrs []string) error { + if !out.ttlEnabled { + logp.Debug("output_elasticsearch", "Not publishing IPs because TTL was not yet confirmed to be enabled") + return nil + } + + logp.Debug("output_elasticsearch", "Publish IPs %s with expiration time %d", localAddrs, out.TopologyExpire) + params := map[string]string{ + "ttl": fmt.Sprintf("%dms", out.TopologyExpire), + "refresh": "true", + } + _, err := out.Conn.Index( + ".packetbeat-topology", /*index*/ + "server-ip", /*type*/ + name, /* id */ + params, /* parameters */ + PublishedTopology{name, strings.Join(localAddrs, ",")} /* body */) + + if err != nil { + logp.Err("Fail to publish IP addresses: %s", err) + return err + } + + out.UpdateLocalTopologyMap() + + return nil +} + +// Update the local topology map +func (out *ElasticsearchOutput) UpdateLocalTopologyMap() { + + // get all shippers IPs from Elasticsearch + TopologyMapTmp := make(map[string]string) + + res, err := out.Conn.SearchUri(".packetbeat-topology", "server-ip", nil) + if err == nil { + for _, obj := range res.Hits.Hits { + var result QueryResult + err = json.Unmarshal(obj, &result) + if err != nil { + return + } + + var pub PublishedTopology + err = json.Unmarshal(result.Source, &pub) + if err != nil { + logp.Err("json.Unmarshal fails with: %s", err) + } + // add mapping + ipaddrs := strings.Split(pub.IPs, ",") + for _, addr := range ipaddrs { + TopologyMapTmp[addr] = pub.Name + } + } + } else { + logp.Err("Getting topology map fails with: %s", err) + } + + // update topology map + out.TopologyMap = TopologyMapTmp + + logp.Debug("output_elasticsearch", "Topology map %s", out.TopologyMap) +} + +// Publish an event by adding it to the queue of events. +func (out *ElasticsearchOutput) PublishEvent(ts time.Time, event common.MapStr) error { + + out.sendingQueue <- EventMsg{Ts: ts, Event: event} + + logp.Debug("output_elasticsearch", "Publish event: %s", event) + return nil +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/output_test.go b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/output_test.go new file mode 100644 index 00000000000..d2c89b877d3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/elasticsearch/output_test.go @@ -0,0 +1,347 @@ +package elasticsearch + +import ( + "fmt" + "os" + "strconv" + "testing" + "time" + + "github.com/elastic/libbeat/common" + "github.com/elastic/libbeat/logp" + "github.com/elastic/libbeat/outputs" +) + +const elasticsearchAddr = "localhost" +const elasticsearchPort = 9200 + +func createElasticsearchConnection(flush_interval int, bulk_size int) ElasticsearchOutput { + + index := fmt.Sprintf("packetbeat-unittest-%d", os.Getpid()) + + var es_port int + var err error + + // read the Elasticsearch port from the ES_PORT env variable + port := os.Getenv("ES_PORT") + if len(port) > 0 { + es_port, err = strconv.Atoi(port) + if err != nil { + // error occurred, use the default + es_port = elasticsearchPort + } + } else { + // empty variable + es_port = elasticsearchPort + } + + var elasticsearchOutput ElasticsearchOutput + elasticsearchOutput.Init(outputs.MothershipConfig{ + Enabled: true, + Save_topology: true, + Host: elasticsearchAddr, + Port: es_port, + Username: "", + Password: "", + Path: "", + Index: index, + Protocol: "", + Flush_interval: &flush_interval, + Bulk_size: &bulk_size, + }, 10) + + return elasticsearchOutput +} + +func TestTopologyInES(t *testing.T) { + if testing.Short() { + t.Skip("Skipping topology tests in short mode, because they require Elasticsearch") + } + if testing.Verbose() { + logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"topology", "output_elasticsearch"}) + } + + elasticsearchOutput1 := createElasticsearchConnection(0, 0) + elasticsearchOutput2 := createElasticsearchConnection(0, 0) + elasticsearchOutput3 := createElasticsearchConnection(0, 0) + + elasticsearchOutput1.PublishIPs("proxy1", []string{"10.1.0.4"}) + elasticsearchOutput2.PublishIPs("proxy2", []string{"10.1.0.9", + "fe80::4e8d:79ff:fef2:de6a"}) + elasticsearchOutput3.PublishIPs("proxy3", []string{"10.1.0.10"}) + + name2 := elasticsearchOutput3.GetNameByIP("10.1.0.9") + if name2 != "proxy2" { + t.Errorf("Failed to update proxy2 in topology: name=%s", name2) + } + + elasticsearchOutput1.PublishIPs("proxy1", []string{"10.1.0.4"}) + elasticsearchOutput2.PublishIPs("proxy2", []string{"10.1.0.9"}) + elasticsearchOutput3.PublishIPs("proxy3", []string{"192.168.1.2"}) + + name3 := elasticsearchOutput3.GetNameByIP("192.168.1.2") + if name3 != "proxy3" { + t.Errorf("Failed to add a new IP") + } + + name3 = elasticsearchOutput3.GetNameByIP("10.1.0.10") + if name3 != "" { + t.Errorf("Failed to delete old IP of proxy3: %s", name3) + } + + name2 = elasticsearchOutput3.GetNameByIP("fe80::4e8d:79ff:fef2:de6a") + if name2 != "" { + t.Errorf("Failed to delete old IP of proxy2: %s", name2) + } +} + +func TestOneEvent(t *testing.T) { + if testing.Short() { + t.Skip("Skipping events publish in short mode, because they require Elasticsearch") + } + if testing.Verbose() { + logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"elasticsearch", "output_elasticsearch"}) + } + + ts := time.Now() + + elasticsearchOutput := createElasticsearchConnection(0, 0) + + event := common.MapStr{} + event["type"] = "redis" + event["status"] = "OK" + event["responsetime"] = 34 + event["dst_ip"] = "192.168.21.1" + event["dst_port"] = 6379 + event["src_ip"] = "192.168.22.2" + event["src_port"] = 6378 + event["shipper"] = "appserver1" + r := common.MapStr{} + r["request"] = "MGET key1" + r["response"] = "value1" + + index := fmt.Sprintf("%s-%d.%02d.%02d", elasticsearchOutput.Index, ts.Year(), ts.Month(), ts.Day()) + logp.Debug("output_elasticsearch", "index = %s", index) + elasticsearchOutput.Conn.CreateIndex(index, common.MapStr{ + "settings": common.MapStr{ + "number_of_shards": 1, + "number_of_replicas": 0, + }, + }) + + err := elasticsearchOutput.PublishEvent(ts, event) + if err != nil { + t.Errorf("Failed to publish the event: %s", err) + } + + // give control to the other goroutine, otherwise the refresh happens + // before the refresh. We should find a better solution for this. + time.Sleep(200 * time.Millisecond) + + _, err = elasticsearchOutput.Conn.Refresh(index) + if err != nil { + t.Errorf("Failed to refresh: %s", err) + } + + defer func() { + _, err = elasticsearchOutput.Conn.Delete(index, "", "", nil) + if err != nil { + t.Errorf("Failed to delete index: %s", err) + } + }() + + params := map[string]string{ + "q": "shipper:appserver1", + } + resp, err := elasticsearchOutput.Conn.SearchUri(index, "", params) + + if err != nil { + t.Errorf("Failed to query elasticsearch for index(%s): %s", index, err) + return + } + logp.Debug("output_elasticsearch", "resp = %s", resp) + if resp.Hits.Total != 1 { + t.Errorf("Wrong number of results: %d", resp.Hits.Total) + } + +} + +func TestEvents(t *testing.T) { + if testing.Short() { + t.Skip("Skipping events publish in short mode, because they require Elasticsearch") + } + if testing.Verbose() { + logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"topology", "output_elasticsearch"}) + } + + ts := time.Now() + + elasticsearchOutput := createElasticsearchConnection(0, 0) + + event := common.MapStr{} + event["type"] = "redis" + event["status"] = "OK" + event["responsetime"] = 34 + event["dst_ip"] = "192.168.21.1" + event["dst_port"] = 6379 + event["src_ip"] = "192.168.22.2" + event["src_port"] = 6378 + event["shipper"] = "appserver1" + r := common.MapStr{} + r["request"] = "MGET key1" + r["response"] = "value1" + event["redis"] = r + + index := fmt.Sprintf("%s-%d.%02d.%02d", elasticsearchOutput.Index, ts.Year(), ts.Month(), ts.Day()) + elasticsearchOutput.Conn.CreateIndex(index, common.MapStr{ + "settings": common.MapStr{ + "number_of_shards": 1, + "number_of_replicas": 0, + }, + }) + + err := elasticsearchOutput.PublishEvent(ts, event) + if err != nil { + t.Errorf("Failed to publish the event: %s", err) + } + + r = common.MapStr{} + r["request"] = "MSET key1 value1" + r["response"] = 0 + event["redis"] = r + + err = elasticsearchOutput.PublishEvent(ts, event) + if err != nil { + t.Errorf("Failed to publish the event: %s", err) + } + + // give control to the other goroutine, otherwise the refresh happens + // before the refresh. We should find a better solution for this. + time.Sleep(200 * time.Millisecond) + + elasticsearchOutput.Conn.Refresh(index) + + params := map[string]string{ + "q": "shipper:appserver1", + } + + defer func() { + _, err = elasticsearchOutput.Conn.Delete(index, "", "", nil) + if err != nil { + t.Errorf("Failed to delete index: %s", err) + } + }() + + resp, err := elasticsearchOutput.Conn.SearchUri(index, "", params) + + if err != nil { + t.Errorf("Failed to query elasticsearch: %s", err) + } + if resp.Hits.Total != 2 { + t.Errorf("Wrong number of results: %d", resp.Hits.Total) + } +} + +func test_bulk_with_params(t *testing.T, elasticsearchOutput ElasticsearchOutput) { + ts := time.Now() + index := fmt.Sprintf("%s-%d.%02d.%02d", elasticsearchOutput.Index, ts.Year(), ts.Month(), ts.Day()) + + elasticsearchOutput.Conn.CreateIndex(index, common.MapStr{ + "settings": common.MapStr{ + "number_of_shards": 1, + "number_of_replicas": 0, + }, + }) + + for i := 0; i < 10; i++ { + + event := common.MapStr{} + event["type"] = "redis" + event["status"] = "OK" + event["responsetime"] = 34 + event["dst_ip"] = "192.168.21.1" + event["dst_port"] = 6379 + event["src_ip"] = "192.168.22.2" + event["src_port"] = 6378 + event["shipper"] = "appserver" + strconv.Itoa(i) + r := common.MapStr{} + r["request"] = "MGET key" + strconv.Itoa(i) + r["response"] = "value" + strconv.Itoa(i) + event["redis"] = r + + err := elasticsearchOutput.PublishEvent(ts, event) + if err != nil { + t.Errorf("Failed to publish the event: %s", err) + } + + } + + // give control to the other goroutine, otherwise the refresh happens + // before the index. We should find a better solution for this. + time.Sleep(200 * time.Millisecond) + + elasticsearchOutput.Conn.Refresh(index) + + params := map[string]string{ + "q": "type:redis", + } + + defer func() { + _, err := elasticsearchOutput.Conn.Delete(index, "", "", nil) + if err != nil { + t.Errorf("Failed to delete index: %s", err) + } + }() + + resp, err := elasticsearchOutput.Conn.SearchUri(index, "", params) + + if err != nil { + t.Errorf("Failed to query elasticsearch: %s", err) + return + } + if resp.Hits.Total != 10 { + t.Errorf("Wrong number of results: %d", resp.Hits.Total) + } +} + +func TestBulkEvents(t *testing.T) { + if testing.Short() { + t.Skip("Skipping events publish in short mode, because they require Elasticsearch") + } + if testing.Verbose() { + logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"topology", "output_elasticsearch", "elasticsearch"}) + } + + elasticsearchOutput := createElasticsearchConnection(50, 2) + test_bulk_with_params(t, elasticsearchOutput) + + elasticsearchOutput = createElasticsearchConnection(50, 1000) + test_bulk_with_params(t, elasticsearchOutput) + + elasticsearchOutput = createElasticsearchConnection(50, 5) + test_bulk_with_params(t, elasticsearchOutput) +} + +func TestEnableTTL(t *testing.T) { + if testing.Short() { + t.Skip("Skipping events publish in short mode, because they require Elasticsearch") + } + if testing.Verbose() { + logp.LogInit(logp.LOG_DEBUG, "", false, true, []string{"topology", "output_elasticsearch", "elasticsearch"}) + } + + elasticsearchOutput := createElasticsearchConnection(0, 0) + elasticsearchOutput.Conn.Delete(".packetbeat-topology", "", "", nil) + + err := elasticsearchOutput.EnableTTL() + if err != nil { + t.Errorf("Fail to enable TTL: %s", err) + } + + // should succeed also when index already exists + err = elasticsearchOutput.EnableTTL() + if err != nil { + t.Errorf("Fail to enable TTL: %s", err) + } + +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/fileout/file.go b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/fileout/file.go new file mode 100644 index 00000000000..f7fd424ec56 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/fileout/file.go @@ -0,0 +1,72 @@ +package fileout + +import ( + "encoding/json" + "time" + + "github.com/elastic/libbeat/common" + "github.com/elastic/libbeat/logp" + "github.com/elastic/libbeat/outputs" +) + +type FileOutput struct { + rotator logp.FileRotator +} + +func (out *FileOutput) Init(config outputs.MothershipConfig, topology_expire int) error { + out.rotator.Path = config.Path + out.rotator.Name = config.Filename + if out.rotator.Name == "" { + out.rotator.Name = "packetbeat" + } + + rotateeverybytes := uint64(config.Rotate_every_kb) * 1024 + if rotateeverybytes == 0 { + rotateeverybytes = 10 * 1024 * 1024 + } + out.rotator.RotateEveryBytes = &rotateeverybytes + + keepfiles := config.Number_of_files + if keepfiles == 0 { + keepfiles = 7 + } + out.rotator.KeepFiles = &keepfiles + + err := out.rotator.CreateDirectory() + if err != nil { + return err + } + + err = out.rotator.CheckIfConfigSane() + if err != nil { + return err + } + + return nil +} + +func (out *FileOutput) PublishIPs(name string, localAddrs []string) error { + // not supported by this output type + return nil +} + +func (out *FileOutput) GetNameByIP(ip string) string { + // not supported by this output type + return "" +} + +func (out *FileOutput) PublishEvent(ts time.Time, event common.MapStr) error { + + json_event, err := json.Marshal(event) + if err != nil { + logp.Err("Fail to convert the event to JSON: %s", err) + return err + } + + err = out.rotator.WriteLine(json_event) + if err != nil { + return err + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/outputs.go b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/outputs.go new file mode 100644 index 00000000000..587a34d4fab --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/outputs.go @@ -0,0 +1,72 @@ +package outputs + +import ( + "time" + + "github.com/elastic/libbeat/common" +) + +type MothershipConfig struct { + Enabled bool + Save_topology bool + Host string + Port int + Hosts []string + Protocol string + Username string + Password string + Index string + Path string + Db int + Db_topology int + Timeout int + Reconnect_interval int + Filename string + Rotate_every_kb int + Number_of_files int + DataType string + Flush_interval *int + Bulk_size *int + Max_retries *int +} + +// Functions to be exported by a output plugin +type OutputInterface interface { + // Initialize the output plugin + Init(config MothershipConfig, topology_expire int) error + + // Register the agent name and its IPs to the topology map + PublishIPs(name string, localAddrs []string) error + + // Get the agent name with a specific IP from the topology map + GetNameByIP(ip string) string + + // Publish event + PublishEvent(ts time.Time, event common.MapStr) error +} + +// Output identifier +type OutputPlugin uint16 + +// Output constants +const ( + UnknownOutput OutputPlugin = iota + RedisOutput + ElasticsearchOutput + FileOutput +) + +// Output names +var OutputNames = []string{ + "unknown", + "redis", + "elasticsearch", + "file", +} + +func (o OutputPlugin) String() string { + if int(o) >= len(OutputNames) { + return "impossible" + } + return OutputNames[o] +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/redis/redis.go b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/redis/redis.go new file mode 100644 index 00000000000..c69f0600217 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/redis/redis.go @@ -0,0 +1,311 @@ +package redis + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/elastic/libbeat/common" + "github.com/elastic/libbeat/logp" + "github.com/elastic/libbeat/outputs" + + "github.com/garyburd/redigo/redis" +) + +type RedisDataType uint16 + +const ( + RedisListType RedisDataType = iota + RedisChannelType +) + +type RedisOutput struct { + Index string + Conn redis.Conn + + TopologyExpire time.Duration + ReconnectInterval time.Duration + Hostname string + Password string + Db int + DbTopology int + Timeout time.Duration + DataType RedisDataType + FlushInterval time.Duration + flush_immediatelly bool + + TopologyMap map[string]string + sendingQueue chan RedisQueueMsg + connected bool +} + +type RedisQueueMsg struct { + index string + msg string +} + +func (out *RedisOutput) Init(config outputs.MothershipConfig, topology_expire int) error { + + out.Hostname = fmt.Sprintf("%s:%d", config.Host, config.Port) + + if config.Password != "" { + out.Password = config.Password + } + + if config.Db != 0 { + out.Db = config.Db + } + + out.DbTopology = 1 + if config.Db_topology != 0 { + out.DbTopology = config.Db_topology + } + + out.Timeout = 5 * time.Second + if config.Timeout != 0 { + out.Timeout = time.Duration(config.Timeout) * time.Second + } + + if config.Index != "" { + out.Index = config.Index + } else { + out.Index = "packetbeat" + } + + out.FlushInterval = 1000 * time.Millisecond + if config.Flush_interval != nil { + if *config.Flush_interval < 0 { + out.flush_immediatelly = true + logp.Warn("Flushing to REDIS on each push, performance migh be affected") + } else { + out.FlushInterval = time.Duration(*config.Flush_interval) * time.Millisecond + } + } + + out.ReconnectInterval = time.Duration(1) * time.Second + if config.Reconnect_interval != 0 { + out.ReconnectInterval = time.Duration(config.Reconnect_interval) * time.Second + } + + exp_sec := 15 + if topology_expire != 0 { + exp_sec = topology_expire + } + out.TopologyExpire = time.Duration(exp_sec) * time.Second + + switch config.DataType { + case "", "list": + out.DataType = RedisListType + case "channel": + out.DataType = RedisChannelType + default: + return errors.New("Bad Redis data type") + } + + logp.Info("[RedisOutput] Using Redis server %s", out.Hostname) + if out.Password != "" { + logp.Info("[RedisOutput] Using password to connect to Redis") + } + logp.Info("[RedisOutput] Redis connection timeout %s", out.Timeout) + logp.Info("[RedisOutput] Redis reconnect interval %s", out.ReconnectInterval) + logp.Info("[RedisOutput] Redis flushing interval %s", out.FlushInterval) + logp.Info("[RedisOutput] Using index pattern %s", out.Index) + logp.Info("[RedisOutput] Topology expires after %s", out.TopologyExpire) + logp.Info("[RedisOutput] Using db %d for storing events", out.Db) + logp.Info("[RedisOutput] Using db %d for storing topology", out.DbTopology) + logp.Info("[RedisOutput] Using %d data type", out.DataType) + + out.sendingQueue = make(chan RedisQueueMsg, 1000) + + out.Reconnect() + go out.SendMessagesGoroutine() + + return nil +} + +func (out *RedisOutput) RedisConnect(db int) (redis.Conn, error) { + conn, err := redis.DialTimeout( + "tcp", + out.Hostname, + out.Timeout, out.Timeout, out.Timeout) + if err != nil { + return nil, err + } + + if len(out.Password) > 0 { + _, err = conn.Do("AUTH", out.Password) + if err != nil { + return nil, err + } + } + + _, err = conn.Do("PING") + if err != nil { + return nil, err + } + + _, err = conn.Do("SELECT", db) + if err != nil { + return nil, err + } + + return conn, nil +} + +func (out *RedisOutput) Connect() error { + var err error + out.Conn, err = out.RedisConnect(out.Db) + if err != nil { + return err + } + out.connected = true + + return nil +} + +func (out *RedisOutput) Close() { + out.Conn.Close() +} + +func (out *RedisOutput) SendMessagesGoroutine() { + + var err error + var pending int + flushChannel := make(<-chan time.Time) + + if !out.flush_immediatelly { + flushTicker := time.NewTicker(out.FlushInterval) + flushChannel = flushTicker.C + } + + for { + select { + case queueMsg := <-out.sendingQueue: + + if !out.connected { + logp.Debug("output_redis", "Droping pkt ...") + continue + } + logp.Debug("output_redis", "Send event to redis") + command := "RPUSH" + if out.DataType == RedisChannelType { + command = "PUBLISH" + } + + if !out.flush_immediatelly { + err = out.Conn.Send(command, queueMsg.index, queueMsg.msg) + pending += 1 + } else { + _, err = out.Conn.Do(command, queueMsg.index, queueMsg.msg) + } + if err != nil { + logp.Err("Fail to publish event to REDIS: %s", err) + out.connected = false + go out.Reconnect() + } + case _ = <-flushChannel: + if pending > 0 { + out.Conn.Flush() + _, err = out.Conn.Receive() + if err != nil { + logp.Err("Fail to publish event to REDIS: %s", err) + out.connected = false + go out.Reconnect() + } + logp.Debug("output_redis", "Flushed %d pending commands", pending) + pending = 0 + } + } + } +} + +func (out *RedisOutput) Reconnect() { + + for { + err := out.Connect() + if err != nil { + logp.Warn("Error connecting to Redis (%s). Retrying in %s", err, out.ReconnectInterval) + time.Sleep(out.ReconnectInterval) + } else { + break + } + } +} + +func (out *RedisOutput) GetNameByIP(ip string) string { + name, exists := out.TopologyMap[ip] + if !exists { + return "" + } + return name +} + +func (out *RedisOutput) PublishIPs(name string, localAddrs []string) error { + + logp.Debug("output_redis", "[%s] Publish the IPs %s", name, localAddrs) + + // connect to db + conn, err := out.RedisConnect(out.DbTopology) + if err != nil { + return err + } + defer conn.Close() + + _, err = conn.Do("HSET", name, "ipaddrs", strings.Join(localAddrs, ",")) + if err != nil { + logp.Err("[%s] Fail to set the IP addresses: %s", name, err) + return err + } + + _, err = conn.Do("EXPIRE", name, int(out.TopologyExpire.Seconds())) + if err != nil { + logp.Err("[%s] Fail to set the expiration time: %s", name, err) + return err + } + + out.UpdateLocalTopologyMap(conn) + + return nil +} + +func (out *RedisOutput) UpdateLocalTopologyMap(conn redis.Conn) { + + TopologyMapTmp := make(map[string]string) + + hostnames, err := redis.Strings(conn.Do("KEYS", "*")) + if err != nil { + logp.Err("Fail to get the all shippers from the topology map %s", err) + return + } + for _, hostname := range hostnames { + res, err := redis.String(conn.Do("HGET", hostname, "ipaddrs")) + if err != nil { + logp.Err("[%s] Fail to get the IPs: %s", hostname, err) + } else { + ipaddrs := strings.Split(res, ",") + for _, addr := range ipaddrs { + TopologyMapTmp[addr] = hostname + } + } + } + + out.TopologyMap = TopologyMapTmp + + logp.Debug("output_redis", "Topology %s", TopologyMapTmp) +} + +func (out *RedisOutput) PublishEvent(ts time.Time, event common.MapStr) error { + + json_event, err := json.Marshal(event) + if err != nil { + logp.Err("Fail to convert the event to JSON: %s", err) + return err + } + + out.sendingQueue <- RedisQueueMsg{index: out.Index, msg: string(json_event)} + + logp.Debug("output_redis", "Publish event") + return nil +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/redis/redis_test.go b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/redis/redis_test.go new file mode 100644 index 00000000000..373dec3ee9a --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/outputs/redis/redis_test.go @@ -0,0 +1,69 @@ +package redis + +import ( + "testing" + "time" +) + +const redisAddr = ":6379" + +func TestTopologyInRedis(t *testing.T) { + if testing.Short() { + t.Skip("Skipping topology tests in short mode, because they require REDIS") + } + + var redisOutput1 = RedisOutput{ + Index: "packetbeat", + Hostname: redisAddr, + Password: "", + DbTopology: 1, + Timeout: time.Duration(5) * time.Second, + TopologyExpire: time.Duration(15) * time.Second, + } + + var redisOutput2 = RedisOutput{ + Index: "packetbeat", + Hostname: redisAddr, + Password: "", + DbTopology: 1, + Timeout: time.Duration(5) * time.Second, + TopologyExpire: time.Duration(15) * time.Second, + } + + var redisOutput3 = RedisOutput{ + Index: "packetbeat", + Hostname: redisAddr, + Password: "", + DbTopology: 1, + Timeout: time.Duration(5) * time.Second, + TopologyExpire: time.Duration(15) * time.Second, + } + + redisOutput1.PublishIPs("proxy1", []string{"10.1.0.4"}) + redisOutput2.PublishIPs("proxy2", []string{"10.1.0.9", "fe80::4e8d:79ff:fef2:de6a"}) + redisOutput3.PublishIPs("proxy3", []string{"10.1.0.10"}) + + name2 := redisOutput3.GetNameByIP("10.1.0.9") + if name2 != "proxy2" { + t.Errorf("Failed to update proxy2 in topology: name=%s", name2) + } + + redisOutput1.PublishIPs("proxy1", []string{"10.1.0.4"}) + redisOutput2.PublishIPs("proxy2", []string{"10.1.0.9"}) + redisOutput3.PublishIPs("proxy3", []string{"192.168.1.2"}) + + name3 := redisOutput3.GetNameByIP("192.168.1.2") + if name3 != "proxy3" { + t.Errorf("Failed to add a new IP") + } + + name3 = redisOutput3.GetNameByIP("10.1.0.10") + if name3 != "" { + t.Errorf("Failed to delete old IP of proxy3: %s", name3) + } + + name2 = redisOutput3.GetNameByIP("fe80::4e8d:79ff:fef2:de6a") + if name2 != "" { + t.Errorf("Failed to delete old IP of proxy2: %s", name2) + } +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/publisher/publish.go b/Godeps/_workspace/src/github.com/elastic/libbeat/publisher/publish.go new file mode 100644 index 00000000000..ba9dc68572e --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/publisher/publish.go @@ -0,0 +1,295 @@ +package publisher + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "time" + + "github.com/elastic/libbeat/common" + "github.com/elastic/libbeat/logp" + "github.com/elastic/libbeat/outputs" + "github.com/elastic/libbeat/outputs/elasticsearch" + "github.com/elastic/libbeat/outputs/fileout" + "github.com/elastic/libbeat/outputs/redis" + "github.com/nranchev/go-libGeoIP" +) + +type PublisherType struct { + name string + tags []string + disabled bool + Index string + Output []outputs.OutputInterface + TopologyOutput outputs.OutputInterface + IgnoreOutgoing bool + GeoLite *libgeo.GeoIP + + RefreshTopologyTimer <-chan time.Time + Queue chan common.MapStr +} + +type ShipperConfig struct { + Name string + Refresh_topology_freq int + Ignore_outgoing bool + Topology_expire int + Tags []string + Geoip common.Geoip +} + +var Publisher PublisherType + +type Topology struct { + Name string `json:"name"` + Ip string `json:"ip"` +} + +var EnabledOutputPlugins map[outputs.OutputPlugin]outputs.OutputInterface = map[outputs.OutputPlugin]outputs.OutputInterface{ + outputs.RedisOutput: new(redis.RedisOutput), + outputs.ElasticsearchOutput: new(elasticsearch.ElasticsearchOutput), + outputs.FileOutput: new(fileout.FileOutput), +} + +func PrintPublishEvent(event common.MapStr) { + json, err := json.MarshalIndent(event, "", " ") + if err != nil { + logp.Err("json.Marshal: %s", err) + } else { + logp.Debug("publish", "Publish: %s", string(json)) + } +} + +func (publisher *PublisherType) GetServerName(ip string) string { + // in case the IP is localhost, return current shipper name + islocal, err := common.IsLoopback(ip) + if err != nil { + logp.Err("Parsing IP %s fails with: %s", ip, err) + return "" + } else { + if islocal { + return publisher.name + } + } + // find the shipper with the desired IP + if publisher.TopologyOutput != nil { + return publisher.TopologyOutput.GetNameByIP(ip) + } else { + return "" + } +} + +func (publisher *PublisherType) publishFromQueue() { + for mapstr := range publisher.Queue { + err := publisher.publishEvent(mapstr) + if err != nil { + logp.Err("Publishing failed: %v", err) + } + } +} + +func (publisher *PublisherType) publishEvent(event common.MapStr) error { + + // the timestamp is mandatory + ts, ok := event["timestamp"].(common.Time) + if !ok { + return errors.New("Missing 'timestamp' field from event.") + } + + // the count is mandatory + err := event.EnsureCountField() + if err != nil { + return err + } + + // the type is mandatory + _, ok = event["type"].(string) + if !ok { + return errors.New("Missing 'type' field from event.") + } + + var src_server, dst_server string + src, ok := event["src"].(*common.Endpoint) + if ok { + src_server = publisher.GetServerName(src.Ip) + event["client_ip"] = src.Ip + event["client_port"] = src.Port + event["client_proc"] = src.Proc + event["client_server"] = src_server + delete(event, "src") + } + dst, ok := event["dst"].(*common.Endpoint) + if ok { + dst_server = publisher.GetServerName(dst.Ip) + event["ip"] = dst.Ip + event["port"] = dst.Port + event["proc"] = dst.Proc + event["server"] = dst_server + delete(event, "dst") + } + + if publisher.IgnoreOutgoing && dst_server != "" && + dst_server != publisher.name { + // duplicated transaction -> ignore it + logp.Debug("publish", "Ignore duplicated transaction on %s: %s -> %s", publisher.name, src_server, dst_server) + return nil + } + + event["shipper"] = publisher.name + if len(publisher.tags) > 0 { + event["tags"] = publisher.tags + } + + if publisher.GeoLite != nil { + real_ip, exists := event["real_ip"] + if exists && len(real_ip.(string)) > 0 { + loc := publisher.GeoLite.GetLocationByIP(real_ip.(string)) + if loc != nil && loc.Latitude != 0 && loc.Longitude != 0 { + event["client_location"] = fmt.Sprintf("%f, %f", loc.Latitude, loc.Longitude) + } + } else { + if len(src_server) == 0 && src != nil { // only for external IP addresses + loc := publisher.GeoLite.GetLocationByIP(src.Ip) + if loc != nil && loc.Latitude != 0 && loc.Longitude != 0 { + event["client_location"] = fmt.Sprintf("%f, %f", loc.Latitude, loc.Longitude) + } + } + } + } + + if logp.IsDebug("publish") { + PrintPublishEvent(event) + } + + // add transaction + has_error := false + if !publisher.disabled { + for i := 0; i < len(publisher.Output); i++ { + err := publisher.Output[i].PublishEvent(time.Time(ts), event) + if err != nil { + logp.Err("Fail to publish event type on output %s: %v", publisher.Output[i], err) + has_error = true + } + } + } + + if has_error { + return errors.New("Fail to publish event") + } + return nil +} + +func (publisher *PublisherType) UpdateTopologyPeriodically() { + for _ = range publisher.RefreshTopologyTimer { + publisher.PublishTopology() + } +} + +func (publisher *PublisherType) PublishTopology(params ...string) error { + + var localAddrs []string = params + + if len(params) == 0 { + addrs, err := common.LocalIpAddrsAsStrings(false) + if err != nil { + logp.Err("Getting local IP addresses fails with: %s", err) + return err + } + localAddrs = addrs + } + + if publisher.TopologyOutput != nil { + logp.Debug("publish", "Add topology entry for %s: %s", publisher.name, localAddrs) + + err := publisher.TopologyOutput.PublishIPs(publisher.name, localAddrs) + if err != nil { + return err + } + } + + return nil +} + +func (publisher *PublisherType) Init(publishDisabled bool, + outputs map[string]outputs.MothershipConfig, shipper ShipperConfig) error { + var err error + publisher.IgnoreOutgoing = shipper.Ignore_outgoing + + publisher.disabled = publishDisabled + if publisher.disabled { + logp.Info("Dry run mode. All output types except the file based one are disabled.") + } + + publisher.GeoLite = common.LoadGeoIPData(shipper.Geoip) + + for outputId, plugin := range EnabledOutputPlugins { + outputName := outputId.String() + output, exists := outputs[outputName] + if exists && output.Enabled && !publisher.disabled { + err := plugin.Init(output, shipper.Topology_expire) + if err != nil { + logp.Err("Fail to initialize %s plugin as output: %s", outputName, err) + return err + } + publisher.Output = append(publisher.Output, plugin) + + if output.Save_topology { + if publisher.TopologyOutput != nil { + logp.Err("Multiple outputs defined to store topology. Please add save_topology = true option only for one output.") + return errors.New("Multiple outputs defined to store topology") + } + publisher.TopologyOutput = plugin + logp.Info("Using %s to store the topology", outputName) + } + } + } + + if !publisher.disabled { + if len(publisher.Output) == 0 { + logp.Info("No outputs are defined. Please define one under the shipper->output section.") + return errors.New("No outputs are defined. Please define one under the shipper->output section.") + } + + if publisher.TopologyOutput == nil { + logp.Warn("No output is defined to store the topology. The server fields might not be filled.") + } + } + + publisher.name = shipper.Name + if len(publisher.name) == 0 { + // use the hostname + publisher.name, err = os.Hostname() + if err != nil { + return err + } + + logp.Info("No shipper name configured, using hostname '%s'", publisher.name) + } + + publisher.tags = shipper.Tags + + if !publisher.disabled && publisher.TopologyOutput != nil { + RefreshTopologyFreq := 10 * time.Second + if shipper.Refresh_topology_freq != 0 { + RefreshTopologyFreq = time.Duration(shipper.Refresh_topology_freq) * time.Second + } + publisher.RefreshTopologyTimer = time.Tick(RefreshTopologyFreq) + logp.Info("Topology map refreshed every %s", RefreshTopologyFreq) + + // register shipper and its public IP addresses + err = publisher.PublishTopology() + if err != nil { + logp.Err("Failed to publish topology: %s", err) + return err + } + + // update topology periodically + go publisher.UpdateTopologyPeriodically() + } + + publisher.Queue = make(chan common.MapStr, 1000) + go publisher.publishFromQueue() + + return nil +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/service/service.go b/Godeps/_workspace/src/github.com/elastic/libbeat/service/service.go new file mode 100644 index 00000000000..36e41c455c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/service/service.go @@ -0,0 +1,93 @@ +package service + +import ( + "flag" + "log" + "os" + "os/signal" + "runtime" + "runtime/pprof" + "syscall" + + "github.com/elastic/libbeat/logp" +) + +// Handles OS signals that ask the service/daemon to stop. +// The stopFunction should break the loop in the Beat so that +// the service shut downs gracefully. +func HandleSignals(stopFunction func()) { + // On ^C or SIGTERM, gracefully stop the sniffer + sigc := make(chan os.Signal, 1) + signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-sigc + logp.Debug("service", "Received sigterm/sigint, stopping") + stopFunction() + }() + + // Handle the Windows service events + go ProcessWindowsControlEvents(func() { + logp.Debug("service", "Received svc stop/shutdown request") + stopFunction() + }) +} + +// cmdline flags +var memprofile, cpuprofile *string + +func CmdLineFlags(flags *flag.FlagSet) { + memprofile = flags.String("memprofile", "", "Write memory profile to this file") + cpuprofile = flags.String("cpuprofile", "", "Write cpu profile to file") +} + +func WithMemProfile() bool { + return *memprofile != "" +} + +func WithCpuProfile() bool { + return *cpuprofile != "" +} + +func BeforeRun() { + + if *cpuprofile != "" { + f, err := os.Create(*cpuprofile) + if err != nil { + log.Fatal(err) + } + pprof.StartCPUProfile(f) + } +} + +func Cleanup() { + if *cpuprofile != "" { + pprof.StopCPUProfile() + } + + if *memprofile != "" { + runtime.GC() + + writeHeapProfile(*memprofile) + + debugMemStats() + } +} + +func debugMemStats() { + var m runtime.MemStats + runtime.ReadMemStats(&m) + logp.Debug("mem", "Memory stats: In use: %d Total (even if freed): %d System: %d", + m.Alloc, m.TotalAlloc, m.Sys) +} + +func writeHeapProfile(filename string) { + f, err := os.Create(filename) + if err != nil { + logp.Err("Failed creating file %s: %s", filename, err) + return + } + pprof.WriteHeapProfile(f) + f.Close() + + logp.Info("Created memory profile file %s.", filename) +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/service/service_unix.go b/Godeps/_workspace/src/github.com/elastic/libbeat/service/service_unix.go new file mode 100644 index 00000000000..fc78b3a444d --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/service/service_unix.go @@ -0,0 +1,7 @@ +// +build !windows + +package service + +// On non-windows platforms, this function does nothing. +func ProcessWindowsControlEvents(stopCallback func()) { +} diff --git a/Godeps/_workspace/src/github.com/elastic/libbeat/service/service_windows.go b/Godeps/_workspace/src/github.com/elastic/libbeat/service/service_windows.go new file mode 100644 index 00000000000..ddd557fc36c --- /dev/null +++ b/Godeps/_workspace/src/github.com/elastic/libbeat/service/service_windows.go @@ -0,0 +1,48 @@ +package service + +import ( + "os" + "time" + + "github.com/elastic/libbeat/logp" + "golang.org/x/sys/windows/svc" +) + +type beatService struct{} + +func (m *beatService) Execute(args []string, r <-chan svc.ChangeRequest, changes chan<- svc.Status) (ssec bool, errno uint32) { + + const cmdsAccepted = svc.AcceptStop | svc.AcceptShutdown + changes <- svc.Status{State: svc.StartPending} + changes <- svc.Status{State: svc.Running, Accepts: cmdsAccepted} + +loop: + for c := range r { + switch c.Cmd { + case svc.Interrogate: + changes <- c.CurrentStatus + // Testing deadlock from https://code.google.com/p/winsvc/issues/detail?id=4 + time.Sleep(100 * time.Millisecond) + changes <- c.CurrentStatus + case svc.Stop, svc.Shutdown: + break loop + default: + logp.Err("Unexpected control request: $%d. Ignored.", c) + } + } + changes <- svc.Status{State: svc.StopPending} + return +} + +// On windows this creates a loop that only finishes when +// a Stop or Shutdown request is received. On non-windows +// platforms, the function does nothing. The stopCallback +// function is called when the Stop/Shutdown request is +// received. +func ProcessWindowsControlEvents(stopCallback func()) { + err := svc.Run(os.Args[0], &beatService{}) + if err != nil { + logp.Err("Error: %v", err) + } + stopCallback() +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/internal/commandinfo.go b/Godeps/_workspace/src/github.com/garyburd/redigo/internal/commandinfo.go new file mode 100644 index 00000000000..dbc60fc8e84 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/internal/commandinfo.go @@ -0,0 +1,54 @@ +// Copyright 2014 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package internal + +import ( + "strings" +) + +const ( + WatchState = 1 << iota + MultiState + SubscribeState + MonitorState +) + +type CommandInfo struct { + Set, Clear int +} + +var commandInfos = map[string]CommandInfo{ + "WATCH": {Set: WatchState}, + "UNWATCH": {Clear: WatchState}, + "MULTI": {Set: MultiState}, + "EXEC": {Clear: WatchState | MultiState}, + "DISCARD": {Clear: WatchState | MultiState}, + "PSUBSCRIBE": {Set: SubscribeState}, + "SUBSCRIBE": {Set: SubscribeState}, + "MONITOR": {Set: MonitorState}, +} + +func init() { + for n, ci := range commandInfos { + commandInfos[strings.ToLower(n)] = ci + } +} + +func LookupCommandInfo(commandName string) CommandInfo { + if ci, ok := commandInfos[commandName]; ok { + return ci + } + return commandInfos[strings.ToUpper(commandName)] +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/internal/commandinfo_test.go b/Godeps/_workspace/src/github.com/garyburd/redigo/internal/commandinfo_test.go new file mode 100644 index 00000000000..118e94b6731 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/internal/commandinfo_test.go @@ -0,0 +1,27 @@ +package internal + +import "testing" + +func TestLookupCommandInfo(t *testing.T) { + for _, n := range []string{"watch", "WATCH", "wAtch"} { + if LookupCommandInfo(n) == (CommandInfo{}) { + t.Errorf("LookupCommandInfo(%q) = CommandInfo{}, expected non-zero value", n) + } + } +} + +func benchmarkLookupCommandInfo(b *testing.B, names ...string) { + for i := 0; i < b.N; i++ { + for _, c := range names { + LookupCommandInfo(c) + } + } +} + +func BenchmarkLookupCommandInfoCorrectCase(b *testing.B) { + benchmarkLookupCommandInfo(b, "watch", "WATCH", "monitor", "MONITOR") +} + +func BenchmarkLookupCommandInfoMixedCase(b *testing.B) { + benchmarkLookupCommandInfo(b, "wAtch", "WeTCH", "monItor", "MONiTOR") +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/internal/redistest/testdb.go b/Godeps/_workspace/src/github.com/garyburd/redigo/internal/redistest/testdb.go new file mode 100644 index 00000000000..5f955c42448 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/internal/redistest/testdb.go @@ -0,0 +1,65 @@ +// Copyright 2014 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package redistest contains utilities for writing Redigo tests. +package redistest + +import ( + "errors" + "time" + + "github.com/garyburd/redigo/redis" +) + +type testConn struct { + redis.Conn +} + +func (t testConn) Close() error { + _, err := t.Conn.Do("SELECT", "9") + if err != nil { + return nil + } + _, err = t.Conn.Do("FLUSHDB") + if err != nil { + return err + } + return t.Conn.Close() +} + +// Dial dials the local Redis server and selects database 9. To prevent +// stomping on real data, DialTestDB fails if database 9 contains data. The +// returned connection flushes database 9 on close. +func Dial() (redis.Conn, error) { + c, err := redis.DialTimeout("tcp", ":6379", 0, 1*time.Second, 1*time.Second) + if err != nil { + return nil, err + } + + _, err = c.Do("SELECT", "9") + if err != nil { + return nil, err + } + + n, err := redis.Int(c.Do("DBSIZE")) + if err != nil { + return nil, err + } + + if n != 0 { + return nil, errors.New("database #9 is not empty, test can not continue") + } + + return testConn{c}, nil +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn.go new file mode 100644 index 00000000000..e277bc75fd4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn.go @@ -0,0 +1,457 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "net" + "strconv" + "sync" + "time" +) + +// conn is the low-level implementation of Conn +type conn struct { + + // Shared + mu sync.Mutex + pending int + err error + conn net.Conn + + // Read + readTimeout time.Duration + br *bufio.Reader + + // Write + writeTimeout time.Duration + bw *bufio.Writer + + // Scratch space for formatting argument length. + // '*' or '$', length, "\r\n" + lenScratch [32]byte + + // Scratch space for formatting integers and floats. + numScratch [40]byte +} + +// Dial connects to the Redis server at the given network and address. +func Dial(network, address string) (Conn, error) { + dialer := xDialer{} + return dialer.Dial(network, address) +} + +// DialTimeout acts like Dial but takes timeouts for establishing the +// connection to the server, writing a command and reading a reply. +func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) { + netDialer := net.Dialer{Timeout: connectTimeout} + dialer := xDialer{ + NetDial: netDialer.Dial, + ReadTimeout: readTimeout, + WriteTimeout: writeTimeout, + } + return dialer.Dial(network, address) +} + +// A Dialer specifies options for connecting to a Redis server. +type xDialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, then net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // ReadTimeout specifies the timeout for reading a single command + // reply. If ReadTimeout is zero, then no timeout is used. + ReadTimeout time.Duration + + // WriteTimeout specifies the timeout for writing a single command. If + // WriteTimeout is zero, then no timeout is used. + WriteTimeout time.Duration +} + +// Dial connects to the Redis server at address on the named network. +func (d *xDialer) Dial(network, address string) (Conn, error) { + dial := d.NetDial + if dial == nil { + dial = net.Dial + } + netConn, err := dial(network, address) + if err != nil { + return nil, err + } + return &conn{ + conn: netConn, + bw: bufio.NewWriter(netConn), + br: bufio.NewReader(netConn), + readTimeout: d.ReadTimeout, + writeTimeout: d.WriteTimeout, + }, nil +} + +// NewConn returns a new Redigo connection for the given net connection. +func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn { + return &conn{ + conn: netConn, + bw: bufio.NewWriter(netConn), + br: bufio.NewReader(netConn), + readTimeout: readTimeout, + writeTimeout: writeTimeout, + } +} + +func (c *conn) Close() error { + c.mu.Lock() + err := c.err + if c.err == nil { + c.err = errors.New("redigo: closed") + err = c.conn.Close() + } + c.mu.Unlock() + return err +} + +func (c *conn) fatal(err error) error { + c.mu.Lock() + if c.err == nil { + c.err = err + // Close connection to force errors on subsequent calls and to unblock + // other reader or writer. + c.conn.Close() + } + c.mu.Unlock() + return err +} + +func (c *conn) Err() error { + c.mu.Lock() + err := c.err + c.mu.Unlock() + return err +} + +func (c *conn) writeLen(prefix byte, n int) error { + c.lenScratch[len(c.lenScratch)-1] = '\n' + c.lenScratch[len(c.lenScratch)-2] = '\r' + i := len(c.lenScratch) - 3 + for { + c.lenScratch[i] = byte('0' + n%10) + i -= 1 + n = n / 10 + if n == 0 { + break + } + } + c.lenScratch[i] = prefix + _, err := c.bw.Write(c.lenScratch[i:]) + return err +} + +func (c *conn) writeString(s string) error { + c.writeLen('$', len(s)) + c.bw.WriteString(s) + _, err := c.bw.WriteString("\r\n") + return err +} + +func (c *conn) writeBytes(p []byte) error { + c.writeLen('$', len(p)) + c.bw.Write(p) + _, err := c.bw.WriteString("\r\n") + return err +} + +func (c *conn) writeInt64(n int64) error { + return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10)) +} + +func (c *conn) writeFloat64(n float64) error { + return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64)) +} + +func (c *conn) writeCommand(cmd string, args []interface{}) (err error) { + c.writeLen('*', 1+len(args)) + err = c.writeString(cmd) + for _, arg := range args { + if err != nil { + break + } + switch arg := arg.(type) { + case string: + err = c.writeString(arg) + case []byte: + err = c.writeBytes(arg) + case int: + err = c.writeInt64(int64(arg)) + case int64: + err = c.writeInt64(arg) + case float64: + err = c.writeFloat64(arg) + case bool: + if arg { + err = c.writeString("1") + } else { + err = c.writeString("0") + } + case nil: + err = c.writeString("") + default: + var buf bytes.Buffer + fmt.Fprint(&buf, arg) + err = c.writeBytes(buf.Bytes()) + } + } + return err +} + +type protocolError string + +func (pe protocolError) Error() string { + return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe)) +} + +func (c *conn) readLine() ([]byte, error) { + p, err := c.br.ReadSlice('\n') + if err == bufio.ErrBufferFull { + return nil, protocolError("long response line") + } + if err != nil { + return nil, err + } + i := len(p) - 2 + if i < 0 || p[i] != '\r' { + return nil, protocolError("bad response line terminator") + } + return p[:i], nil +} + +// parseLen parses bulk string and array lengths. +func parseLen(p []byte) (int, error) { + if len(p) == 0 { + return -1, protocolError("malformed length") + } + + if p[0] == '-' && len(p) == 2 && p[1] == '1' { + // handle $-1 and $-1 null replies. + return -1, nil + } + + var n int + for _, b := range p { + n *= 10 + if b < '0' || b > '9' { + return -1, protocolError("illegal bytes in length") + } + n += int(b - '0') + } + + return n, nil +} + +// parseInt parses an integer reply. +func parseInt(p []byte) (interface{}, error) { + if len(p) == 0 { + return 0, protocolError("malformed integer") + } + + var negate bool + if p[0] == '-' { + negate = true + p = p[1:] + if len(p) == 0 { + return 0, protocolError("malformed integer") + } + } + + var n int64 + for _, b := range p { + n *= 10 + if b < '0' || b > '9' { + return 0, protocolError("illegal bytes in length") + } + n += int64(b - '0') + } + + if negate { + n = -n + } + return n, nil +} + +var ( + okReply interface{} = "OK" + pongReply interface{} = "PONG" +) + +func (c *conn) readReply() (interface{}, error) { + line, err := c.readLine() + if err != nil { + return nil, err + } + if len(line) == 0 { + return nil, protocolError("short response line") + } + switch line[0] { + case '+': + switch { + case len(line) == 3 && line[1] == 'O' && line[2] == 'K': + // Avoid allocation for frequent "+OK" response. + return okReply, nil + case len(line) == 5 && line[1] == 'P' && line[2] == 'O' && line[3] == 'N' && line[4] == 'G': + // Avoid allocation in PING command benchmarks :) + return pongReply, nil + default: + return string(line[1:]), nil + } + case '-': + return Error(string(line[1:])), nil + case ':': + return parseInt(line[1:]) + case '$': + n, err := parseLen(line[1:]) + if n < 0 || err != nil { + return nil, err + } + p := make([]byte, n) + _, err = io.ReadFull(c.br, p) + if err != nil { + return nil, err + } + if line, err := c.readLine(); err != nil { + return nil, err + } else if len(line) != 0 { + return nil, protocolError("bad bulk string format") + } + return p, nil + case '*': + n, err := parseLen(line[1:]) + if n < 0 || err != nil { + return nil, err + } + r := make([]interface{}, n) + for i := range r { + r[i], err = c.readReply() + if err != nil { + return nil, err + } + } + return r, nil + } + return nil, protocolError("unexpected response line") +} + +func (c *conn) Send(cmd string, args ...interface{}) error { + c.mu.Lock() + c.pending += 1 + c.mu.Unlock() + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + if err := c.writeCommand(cmd, args); err != nil { + return c.fatal(err) + } + return nil +} + +func (c *conn) Flush() error { + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + if err := c.bw.Flush(); err != nil { + return c.fatal(err) + } + return nil +} + +func (c *conn) Receive() (reply interface{}, err error) { + if c.readTimeout != 0 { + c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) + } + if reply, err = c.readReply(); err != nil { + return nil, c.fatal(err) + } + // When using pub/sub, the number of receives can be greater than the + // number of sends. To enable normal use of the connection after + // unsubscribing from all channels, we do not decrement pending to a + // negative value. + // + // The pending field is decremented after the reply is read to handle the + // case where Receive is called before Send. + c.mu.Lock() + if c.pending > 0 { + c.pending -= 1 + } + c.mu.Unlock() + if err, ok := reply.(Error); ok { + return nil, err + } + return +} + +func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) { + c.mu.Lock() + pending := c.pending + c.pending = 0 + c.mu.Unlock() + + if cmd == "" && pending == 0 { + return nil, nil + } + + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + + if cmd != "" { + if err := c.writeCommand(cmd, args); err != nil { + return nil, c.fatal(err) + } + } + + if err := c.bw.Flush(); err != nil { + return nil, c.fatal(err) + } + + if c.readTimeout != 0 { + c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) + } + + if cmd == "" { + reply := make([]interface{}, pending) + for i := range reply { + r, e := c.readReply() + if e != nil { + return nil, c.fatal(e) + } + reply[i] = r + } + return reply, nil + } + + var err error + var reply interface{} + for i := 0; i <= pending; i++ { + var e error + if reply, e = c.readReply(); e != nil { + return nil, c.fatal(e) + } + if e, ok := reply.(Error); ok && err == nil { + err = e + } + } + return reply, err +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn_test.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn_test.go new file mode 100644 index 00000000000..800370136eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/conn_test.go @@ -0,0 +1,542 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis_test + +import ( + "bufio" + "bytes" + "math" + "net" + "reflect" + "strings" + "testing" + "time" + + "github.com/garyburd/redigo/internal/redistest" + "github.com/garyburd/redigo/redis" +) + +var writeTests = []struct { + args []interface{} + expected string +}{ + { + []interface{}{"SET", "key", "value"}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n", + }, + { + []interface{}{"SET", "key", "value"}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n", + }, + { + []interface{}{"SET", "key", byte(100)}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$3\r\n100\r\n", + }, + { + []interface{}{"SET", "key", 100}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$3\r\n100\r\n", + }, + { + []interface{}{"SET", "key", int64(math.MinInt64)}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$20\r\n-9223372036854775808\r\n", + }, + { + []interface{}{"SET", "key", float64(1349673917.939762)}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$21\r\n1.349673917939762e+09\r\n", + }, + { + []interface{}{"SET", "key", ""}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$0\r\n\r\n", + }, + { + []interface{}{"SET", "key", nil}, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$0\r\n\r\n", + }, + { + []interface{}{"ECHO", true, false}, + "*3\r\n$4\r\nECHO\r\n$1\r\n1\r\n$1\r\n0\r\n", + }, +} + +func TestWrite(t *testing.T) { + for _, tt := range writeTests { + var buf bytes.Buffer + rw := bufio.ReadWriter{Writer: bufio.NewWriter(&buf)} + c := redis.NewConnBufio(rw) + err := c.Send(tt.args[0].(string), tt.args[1:]...) + if err != nil { + t.Errorf("Send(%v) returned error %v", tt.args, err) + continue + } + rw.Flush() + actual := buf.String() + if actual != tt.expected { + t.Errorf("Send(%v) = %q, want %q", tt.args, actual, tt.expected) + } + } +} + +var errorSentinel = &struct{}{} + +var readTests = []struct { + reply string + expected interface{} +}{ + { + "+OK\r\n", + "OK", + }, + { + "+PONG\r\n", + "PONG", + }, + { + "@OK\r\n", + errorSentinel, + }, + { + "$6\r\nfoobar\r\n", + []byte("foobar"), + }, + { + "$-1\r\n", + nil, + }, + { + ":1\r\n", + int64(1), + }, + { + ":-2\r\n", + int64(-2), + }, + { + "*0\r\n", + []interface{}{}, + }, + { + "*-1\r\n", + nil, + }, + { + "*4\r\n$3\r\nfoo\r\n$3\r\nbar\r\n$5\r\nHello\r\n$5\r\nWorld\r\n", + []interface{}{[]byte("foo"), []byte("bar"), []byte("Hello"), []byte("World")}, + }, + { + "*3\r\n$3\r\nfoo\r\n$-1\r\n$3\r\nbar\r\n", + []interface{}{[]byte("foo"), nil, []byte("bar")}, + }, + + { + // "x" is not a valid length + "$x\r\nfoobar\r\n", + errorSentinel, + }, + { + // -2 is not a valid length + "$-2\r\n", + errorSentinel, + }, + { + // "x" is not a valid integer + ":x\r\n", + errorSentinel, + }, + { + // missing \r\n following value + "$6\r\nfoobar", + errorSentinel, + }, + { + // short value + "$6\r\nxx", + errorSentinel, + }, + { + // long value + "$6\r\nfoobarx\r\n", + errorSentinel, + }, +} + +func TestRead(t *testing.T) { + for _, tt := range readTests { + rw := bufio.ReadWriter{ + Reader: bufio.NewReader(strings.NewReader(tt.reply)), + Writer: bufio.NewWriter(nil), // writer need to support Flush + } + c := redis.NewConnBufio(rw) + actual, err := c.Receive() + if tt.expected == errorSentinel { + if err == nil { + t.Errorf("Receive(%q) did not return expected error", tt.reply) + } + } else { + if err != nil { + t.Errorf("Receive(%q) returned error %v", tt.reply, err) + continue + } + if !reflect.DeepEqual(actual, tt.expected) { + t.Errorf("Receive(%q) = %v, want %v", tt.reply, actual, tt.expected) + } + } + } +} + +var testCommands = []struct { + args []interface{} + expected interface{} +}{ + { + []interface{}{"PING"}, + "PONG", + }, + { + []interface{}{"SET", "foo", "bar"}, + "OK", + }, + { + []interface{}{"GET", "foo"}, + []byte("bar"), + }, + { + []interface{}{"GET", "nokey"}, + nil, + }, + { + []interface{}{"MGET", "nokey", "foo"}, + []interface{}{nil, []byte("bar")}, + }, + { + []interface{}{"INCR", "mycounter"}, + int64(1), + }, + { + []interface{}{"LPUSH", "mylist", "foo"}, + int64(1), + }, + { + []interface{}{"LPUSH", "mylist", "bar"}, + int64(2), + }, + { + []interface{}{"LRANGE", "mylist", 0, -1}, + []interface{}{[]byte("bar"), []byte("foo")}, + }, + { + []interface{}{"MULTI"}, + "OK", + }, + { + []interface{}{"LRANGE", "mylist", 0, -1}, + "QUEUED", + }, + { + []interface{}{"PING"}, + "QUEUED", + }, + { + []interface{}{"EXEC"}, + []interface{}{ + []interface{}{[]byte("bar"), []byte("foo")}, + "PONG", + }, + }, +} + +func TestDoCommands(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer c.Close() + + for _, cmd := range testCommands { + actual, err := c.Do(cmd.args[0].(string), cmd.args[1:]...) + if err != nil { + t.Errorf("Do(%v) returned error %v", cmd.args, err) + continue + } + if !reflect.DeepEqual(actual, cmd.expected) { + t.Errorf("Do(%v) = %v, want %v", cmd.args, actual, cmd.expected) + } + } +} + +func TestPipelineCommands(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer c.Close() + + for _, cmd := range testCommands { + if err := c.Send(cmd.args[0].(string), cmd.args[1:]...); err != nil { + t.Fatalf("Send(%v) returned error %v", cmd.args, err) + } + } + if err := c.Flush(); err != nil { + t.Errorf("Flush() returned error %v", err) + } + for _, cmd := range testCommands { + actual, err := c.Receive() + if err != nil { + t.Fatalf("Receive(%v) returned error %v", cmd.args, err) + } + if !reflect.DeepEqual(actual, cmd.expected) { + t.Errorf("Receive(%v) = %v, want %v", cmd.args, actual, cmd.expected) + } + } +} + +func TestBlankCommmand(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer c.Close() + + for _, cmd := range testCommands { + if err := c.Send(cmd.args[0].(string), cmd.args[1:]...); err != nil { + t.Fatalf("Send(%v) returned error %v", cmd.args, err) + } + } + reply, err := redis.Values(c.Do("")) + if err != nil { + t.Fatalf("Do() returned error %v", err) + } + if len(reply) != len(testCommands) { + t.Fatalf("len(reply)=%d, want %d", len(reply), len(testCommands)) + } + for i, cmd := range testCommands { + actual := reply[i] + if !reflect.DeepEqual(actual, cmd.expected) { + t.Errorf("Receive(%v) = %v, want %v", cmd.args, actual, cmd.expected) + } + } +} + +func TestRecvBeforeSend(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer c.Close() + done := make(chan struct{}) + go func() { + c.Receive() + close(done) + }() + time.Sleep(time.Millisecond) + c.Send("PING") + c.Flush() + <-done + _, err = c.Do("") + if err != nil { + t.Fatalf("error=%v", err) + } +} + +func TestError(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer c.Close() + + c.Do("SET", "key", "val") + _, err = c.Do("HSET", "key", "fld", "val") + if err == nil { + t.Errorf("Expected err for HSET on string key.") + } + if c.Err() != nil { + t.Errorf("Conn has Err()=%v, expect nil", c.Err()) + } + _, err = c.Do("SET", "key", "val") + if err != nil { + t.Errorf("Do(SET, key, val) returned error %v, expected nil.", err) + } +} + +func TestReadDeadline(t *testing.T) { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen returned %v", err) + } + defer l.Close() + + go func() { + for { + c, err := l.Accept() + if err != nil { + return + } + go func() { + time.Sleep(time.Second) + c.Write([]byte("+OK\r\n")) + c.Close() + }() + } + }() + + c1, err := redis.DialTimeout(l.Addr().Network(), l.Addr().String(), 0, time.Millisecond, 0) + if err != nil { + t.Fatalf("redis.Dial returned %v", err) + } + defer c1.Close() + + _, err = c1.Do("PING") + if err == nil { + t.Fatalf("c1.Do() returned nil, expect error") + } + if c1.Err() == nil { + t.Fatalf("c1.Err() = nil, expect error") + } + + c2, err := redis.DialTimeout(l.Addr().Network(), l.Addr().String(), 0, time.Millisecond, 0) + if err != nil { + t.Fatalf("redis.Dial returned %v", err) + } + defer c2.Close() + + c2.Send("PING") + c2.Flush() + _, err = c2.Receive() + if err == nil { + t.Fatalf("c2.Receive() returned nil, expect error") + } + if c2.Err() == nil { + t.Fatalf("c2.Err() = nil, expect error") + } +} + +// Connect to local instance of Redis running on the default port. +func ExampleDial(x int) { + c, err := redis.Dial("tcp", ":6379") + if err != nil { + // handle error + } + defer c.Close() +} + +// TextExecError tests handling of errors in a transaction. See +// http://redis.io/topics/transactions for information on how Redis handles +// errors in a transaction. +func TestExecError(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer c.Close() + + // Execute commands that fail before EXEC is called. + + c.Do("ZADD", "k0", 0, 0) + c.Send("MULTI") + c.Send("NOTACOMMAND", "k0", 0, 0) + c.Send("ZINCRBY", "k0", 0, 0) + v, err := c.Do("EXEC") + if err == nil { + t.Fatalf("EXEC returned values %v, expected error", v) + } + + // Execute commands that fail after EXEC is called. The first command + // returns an error. + + c.Do("ZADD", "k1", 0, 0) + c.Send("MULTI") + c.Send("HSET", "k1", 0, 0) + c.Send("ZINCRBY", "k1", 0, 0) + v, err = c.Do("EXEC") + if err != nil { + t.Fatalf("EXEC returned error %v", err) + } + + vs, err := redis.Values(v, nil) + if err != nil { + t.Fatalf("Values(v) returned error %v", err) + } + + if len(vs) != 2 { + t.Fatalf("len(vs) == %d, want 2", len(vs)) + } + + if _, ok := vs[0].(error); !ok { + t.Fatalf("first result is type %T, expected error", vs[0]) + } + + if _, ok := vs[1].([]byte); !ok { + t.Fatalf("second result is type %T, expected []byte", vs[2]) + } + + // Execute commands that fail after EXEC is called. The second command + // returns an error. + + c.Do("ZADD", "k2", 0, 0) + c.Send("MULTI") + c.Send("ZINCRBY", "k2", 0, 0) + c.Send("HSET", "k2", 0, 0) + v, err = c.Do("EXEC") + if err != nil { + t.Fatalf("EXEC returned error %v", err) + } + + vs, err = redis.Values(v, nil) + if err != nil { + t.Fatalf("Values(v) returned error %v", err) + } + + if len(vs) != 2 { + t.Fatalf("len(vs) == %d, want 2", len(vs)) + } + + if _, ok := vs[0].([]byte); !ok { + t.Fatalf("first result is type %T, expected []byte", vs[0]) + } + + if _, ok := vs[1].(error); !ok { + t.Fatalf("second result is type %T, expected error", vs[2]) + } +} + +func BenchmarkDoEmpty(b *testing.B) { + b.StopTimer() + c, err := redistest.Dial() + if err != nil { + b.Fatal(err) + } + defer c.Close() + b.StartTimer() + for i := 0; i < b.N; i++ { + if _, err := c.Do(""); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkDoPing(b *testing.B) { + b.StopTimer() + c, err := redistest.Dial() + if err != nil { + b.Fatal(err) + } + defer c.Close() + b.StartTimer() + for i := 0; i < b.N; i++ { + if _, err := c.Do("PING"); err != nil { + b.Fatal(err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/doc.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/doc.go new file mode 100644 index 00000000000..1ae6f0cc2a4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/doc.go @@ -0,0 +1,169 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package redis is a client for the Redis database. +// +// The Redigo FAQ (https://github.com/garyburd/redigo/wiki/FAQ) contains more +// documentation about this package. +// +// Connections +// +// The Conn interface is the primary interface for working with Redis. +// Applications create connections by calling the Dial, DialWithTimeout or +// NewConn functions. In the future, functions will be added for creating +// sharded and other types of connections. +// +// The application must call the connection Close method when the application +// is done with the connection. +// +// Executing Commands +// +// The Conn interface has a generic method for executing Redis commands: +// +// Do(commandName string, args ...interface{}) (reply interface{}, err error) +// +// The Redis command reference (http://redis.io/commands) lists the available +// commands. An example of using the Redis APPEND command is: +// +// n, err := conn.Do("APPEND", "key", "value") +// +// The Do method converts command arguments to binary strings for transmission +// to the server as follows: +// +// Go Type Conversion +// []byte Sent as is +// string Sent as is +// int, int64 strconv.FormatInt(v) +// float64 strconv.FormatFloat(v, 'g', -1, 64) +// bool true -> "1", false -> "0" +// nil "" +// all other types fmt.Print(v) +// +// Redis command reply types are represented using the following Go types: +// +// Redis type Go type +// error redis.Error +// integer int64 +// simple string string +// bulk string []byte or nil if value not present. +// array []interface{} or nil if value not present. +// +// Use type assertions or the reply helper functions to convert from +// interface{} to the specific Go type for the command result. +// +// Pipelining +// +// Connections support pipelining using the Send, Flush and Receive methods. +// +// Send(commandName string, args ...interface{}) error +// Flush() error +// Receive() (reply interface{}, err error) +// +// Send writes the command to the connection's output buffer. Flush flushes the +// connection's output buffer to the server. Receive reads a single reply from +// the server. The following example shows a simple pipeline. +// +// c.Send("SET", "foo", "bar") +// c.Send("GET", "foo") +// c.Flush() +// c.Receive() // reply from SET +// v, err = c.Receive() // reply from GET +// +// The Do method combines the functionality of the Send, Flush and Receive +// methods. The Do method starts by writing the command and flushing the output +// buffer. Next, the Do method receives all pending replies including the reply +// for the command just sent by Do. If any of the received replies is an error, +// then Do returns the error. If there are no errors, then Do returns the last +// reply. If the command argument to the Do method is "", then the Do method +// will flush the output buffer and receive pending replies without sending a +// command. +// +// Use the Send and Do methods to implement pipelined transactions. +// +// c.Send("MULTI") +// c.Send("INCR", "foo") +// c.Send("INCR", "bar") +// r, err := c.Do("EXEC") +// fmt.Println(r) // prints [1, 1] +// +// Concurrency +// +// Connections do not support concurrent calls to the write methods (Send, +// Flush) or concurrent calls to the read method (Receive). Connections do +// allow a concurrent reader and writer. +// +// Because the Do method combines the functionality of Send, Flush and Receive, +// the Do method cannot be called concurrently with the other methods. +// +// For full concurrent access to Redis, use the thread-safe Pool to get and +// release connections from within a goroutine. +// +// Publish and Subscribe +// +// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers. +// +// c.Send("SUBSCRIBE", "example") +// c.Flush() +// for { +// reply, err := c.Receive() +// if err != nil { +// return err +// } +// // process pushed message +// } +// +// The PubSubConn type wraps a Conn with convenience methods for implementing +// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods +// send and flush a subscription management command. The receive method +// converts a pushed message to convenient types for use in a type switch. +// +// psc := redis.PubSubConn{c} +// psc.Subscribe("example") +// for { +// switch v := psc.Receive().(type) { +// case redis.Message: +// fmt.Printf("%s: message: %s\n", v.Channel, v.Data) +// case redis.Subscription: +// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count) +// case error: +// return v +// } +// } +// +// Reply Helpers +// +// The Bool, Int, Bytes, String, Strings and Values functions convert a reply +// to a value of a specific type. To allow convenient wrapping of calls to the +// connection Do and Receive methods, the functions take a second argument of +// type error. If the error is non-nil, then the helper function returns the +// error. If the error is nil, the function converts the reply to the specified +// type: +// +// exists, err := redis.Bool(c.Do("EXISTS", "foo")) +// if err != nil { +// // handle error return from c.Do or type conversion error. +// } +// +// The Scan function converts elements of a array reply to Go types: +// +// var value1 int +// var value2 string +// reply, err := redis.Values(c.Do("MGET", "key1", "key2")) +// if err != nil { +// // handle error +// } +// if _, err := redis.Scan(reply, &value1, &value2); err != nil { +// // handle error +// } +package redis diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/log.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/log.go new file mode 100644 index 00000000000..129b86d6708 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/log.go @@ -0,0 +1,117 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bytes" + "fmt" + "log" +) + +// NewLoggingConn returns a logging wrapper around a connection. +func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn { + if prefix != "" { + prefix = prefix + "." + } + return &loggingConn{conn, logger, prefix} +} + +type loggingConn struct { + Conn + logger *log.Logger + prefix string +} + +func (c *loggingConn) Close() error { + err := c.Conn.Close() + var buf bytes.Buffer + fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err) + c.logger.Output(2, buf.String()) + return err +} + +func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) { + const chop = 32 + switch v := v.(type) { + case []byte: + if len(v) > chop { + fmt.Fprintf(buf, "%q...", v[:chop]) + } else { + fmt.Fprintf(buf, "%q", v) + } + case string: + if len(v) > chop { + fmt.Fprintf(buf, "%q...", v[:chop]) + } else { + fmt.Fprintf(buf, "%q", v) + } + case []interface{}: + if len(v) == 0 { + buf.WriteString("[]") + } else { + sep := "[" + fin := "]" + if len(v) > chop { + v = v[:chop] + fin = "...]" + } + for _, vv := range v { + buf.WriteString(sep) + c.printValue(buf, vv) + sep = ", " + } + buf.WriteString(fin) + } + default: + fmt.Fprint(buf, v) + } +} + +func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s%s(", c.prefix, method) + if method != "Receive" { + buf.WriteString(commandName) + for _, arg := range args { + buf.WriteString(", ") + c.printValue(&buf, arg) + } + } + buf.WriteString(") -> (") + if method != "Send" { + c.printValue(&buf, reply) + buf.WriteString(", ") + } + fmt.Fprintf(&buf, "%v)", err) + c.logger.Output(3, buf.String()) +} + +func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) { + reply, err := c.Conn.Do(commandName, args...) + c.print("Do", commandName, args, reply, err) + return reply, err +} + +func (c *loggingConn) Send(commandName string, args ...interface{}) error { + err := c.Conn.Send(commandName, args...) + c.print("Send", commandName, args, nil, err) + return err +} + +func (c *loggingConn) Receive() (interface{}, error) { + reply, err := c.Conn.Receive() + c.print("Receive", "", nil, reply, err) + return reply, err +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool.go new file mode 100644 index 00000000000..9daf2e33ff3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool.go @@ -0,0 +1,389 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bytes" + "container/list" + "crypto/rand" + "crypto/sha1" + "errors" + "io" + "strconv" + "sync" + "time" + + "github.com/garyburd/redigo/internal" +) + +var nowFunc = time.Now // for testing + +// ErrPoolExhausted is returned from a pool connection method (Do, Send, +// Receive, Flush, Err) when the maximum number of database connections in the +// pool has been reached. +var ErrPoolExhausted = errors.New("redigo: connection pool exhausted") + +var ( + errPoolClosed = errors.New("redigo: connection pool closed") + errConnClosed = errors.New("redigo: connection closed") +) + +// Pool maintains a pool of connections. The application calls the Get method +// to get a connection from the pool and the connection's Close method to +// return the connection's resources to the pool. +// +// The following example shows how to use a pool in a web application. The +// application creates a pool at application startup and makes it available to +// request handlers using a global variable. +// +// func newPool(server, password string) *redis.Pool { +// return &redis.Pool{ +// MaxIdle: 3, +// IdleTimeout: 240 * time.Second, +// Dial: func () (redis.Conn, error) { +// c, err := redis.Dial("tcp", server) +// if err != nil { +// return nil, err +// } +// if _, err := c.Do("AUTH", password); err != nil { +// c.Close() +// return nil, err +// } +// return c, err +// }, +// TestOnBorrow: func(c redis.Conn, t time.Time) error { +// _, err := c.Do("PING") +// return err +// }, +// } +// } +// +// var ( +// pool *redis.Pool +// redisServer = flag.String("redisServer", ":6379", "") +// redisPassword = flag.String("redisPassword", "", "") +// ) +// +// func main() { +// flag.Parse() +// pool = newPool(*redisServer, *redisPassword) +// ... +// } +// +// A request handler gets a connection from the pool and closes the connection +// when the handler is done: +// +// func serveHome(w http.ResponseWriter, r *http.Request) { +// conn := pool.Get() +// defer conn.Close() +// .... +// } +// +type Pool struct { + + // Dial is an application supplied function for creating and configuring a + // connection + Dial func() (Conn, error) + + // TestOnBorrow is an optional application supplied function for checking + // the health of an idle connection before the connection is used again by + // the application. Argument t is the time that the connection was returned + // to the pool. If the function returns an error, then the connection is + // closed. + TestOnBorrow func(c Conn, t time.Time) error + + // Maximum number of idle connections in the pool. + MaxIdle int + + // Maximum number of connections allocated by the pool at a given time. + // When zero, there is no limit on the number of connections in the pool. + MaxActive int + + // Close connections after remaining idle for this duration. If the value + // is zero, then idle connections are not closed. Applications should set + // the timeout to a value less than the server's timeout. + IdleTimeout time.Duration + + // If Wait is true and the pool is at the MaxIdle limit, then Get() waits + // for a connection to be returned to the pool before returning. + Wait bool + + // mu protects fields defined below. + mu sync.Mutex + cond *sync.Cond + closed bool + active int + + // Stack of idleConn with most recently used at the front. + idle list.List +} + +type idleConn struct { + c Conn + t time.Time +} + +// NewPool creates a new pool. This function is deprecated. Applications should +// initialize the Pool fields directly as shown in example. +func NewPool(newFn func() (Conn, error), maxIdle int) *Pool { + return &Pool{Dial: newFn, MaxIdle: maxIdle} +} + +// Get gets a connection. The application must close the returned connection. +// This method always returns a valid connection so that applications can defer +// error handling to the first use of the connection. If there is an error +// getting an underlying connection, then the connection Err, Do, Send, Flush +// and Receive methods return that error. +func (p *Pool) Get() Conn { + c, err := p.get() + if err != nil { + return errorConnection{err} + } + return &pooledConnection{p: p, c: c} +} + +// ActiveCount returns the number of active connections in the pool. +func (p *Pool) ActiveCount() int { + p.mu.Lock() + active := p.active + p.mu.Unlock() + return active +} + +// Close releases the resources used by the pool. +func (p *Pool) Close() error { + p.mu.Lock() + idle := p.idle + p.idle.Init() + p.closed = true + p.active -= idle.Len() + if p.cond != nil { + p.cond.Broadcast() + } + p.mu.Unlock() + for e := idle.Front(); e != nil; e = e.Next() { + e.Value.(idleConn).c.Close() + } + return nil +} + +// release decrements the active count and signals waiters. The caller must +// hold p.mu during the call. +func (p *Pool) release() { + p.active -= 1 + if p.cond != nil { + p.cond.Signal() + } +} + +// get prunes stale connections and returns a connection from the idle list or +// creates a new connection. +func (p *Pool) get() (Conn, error) { + p.mu.Lock() + + // Prune stale connections. + + if timeout := p.IdleTimeout; timeout > 0 { + for i, n := 0, p.idle.Len(); i < n; i++ { + e := p.idle.Back() + if e == nil { + break + } + ic := e.Value.(idleConn) + if ic.t.Add(timeout).After(nowFunc()) { + break + } + p.idle.Remove(e) + p.release() + p.mu.Unlock() + ic.c.Close() + p.mu.Lock() + } + } + + for { + + // Get idle connection. + + for i, n := 0, p.idle.Len(); i < n; i++ { + e := p.idle.Front() + if e == nil { + break + } + ic := e.Value.(idleConn) + p.idle.Remove(e) + test := p.TestOnBorrow + p.mu.Unlock() + if test == nil || test(ic.c, ic.t) == nil { + return ic.c, nil + } + ic.c.Close() + p.mu.Lock() + p.release() + } + + // Check for pool closed before dialing a new connection. + + if p.closed { + p.mu.Unlock() + return nil, errors.New("redigo: get on closed pool") + } + + // Dial new connection if under limit. + + if p.MaxActive == 0 || p.active < p.MaxActive { + dial := p.Dial + p.active += 1 + p.mu.Unlock() + c, err := dial() + if err != nil { + p.mu.Lock() + p.release() + p.mu.Unlock() + c = nil + } + return c, err + } + + if !p.Wait { + p.mu.Unlock() + return nil, ErrPoolExhausted + } + + if p.cond == nil { + p.cond = sync.NewCond(&p.mu) + } + p.cond.Wait() + } +} + +func (p *Pool) put(c Conn, forceClose bool) error { + err := c.Err() + p.mu.Lock() + if !p.closed && err == nil && !forceClose { + p.idle.PushFront(idleConn{t: nowFunc(), c: c}) + if p.idle.Len() > p.MaxIdle { + c = p.idle.Remove(p.idle.Back()).(idleConn).c + } else { + c = nil + } + } + + if c == nil { + if p.cond != nil { + p.cond.Signal() + } + p.mu.Unlock() + return nil + } + + p.release() + p.mu.Unlock() + return c.Close() +} + +type pooledConnection struct { + p *Pool + c Conn + state int +} + +var ( + sentinel []byte + sentinelOnce sync.Once +) + +func initSentinel() { + p := make([]byte, 64) + if _, err := rand.Read(p); err == nil { + sentinel = p + } else { + h := sha1.New() + io.WriteString(h, "Oops, rand failed. Use time instead.") + io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10)) + sentinel = h.Sum(nil) + } +} + +func (pc *pooledConnection) Close() error { + c := pc.c + if _, ok := c.(errorConnection); ok { + return nil + } + pc.c = errorConnection{errConnClosed} + + if pc.state&internal.MultiState != 0 { + c.Send("DISCARD") + pc.state &^= (internal.MultiState | internal.WatchState) + } else if pc.state&internal.WatchState != 0 { + c.Send("UNWATCH") + pc.state &^= internal.WatchState + } + if pc.state&internal.SubscribeState != 0 { + c.Send("UNSUBSCRIBE") + c.Send("PUNSUBSCRIBE") + // To detect the end of the message stream, ask the server to echo + // a sentinel value and read until we see that value. + sentinelOnce.Do(initSentinel) + c.Send("ECHO", sentinel) + c.Flush() + for { + p, err := c.Receive() + if err != nil { + break + } + if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) { + pc.state &^= internal.SubscribeState + break + } + } + } + c.Do("") + pc.p.put(c, pc.state != 0) + return nil +} + +func (pc *pooledConnection) Err() error { + return pc.c.Err() +} + +func (pc *pooledConnection) Do(commandName string, args ...interface{}) (reply interface{}, err error) { + ci := internal.LookupCommandInfo(commandName) + pc.state = (pc.state | ci.Set) &^ ci.Clear + return pc.c.Do(commandName, args...) +} + +func (pc *pooledConnection) Send(commandName string, args ...interface{}) error { + ci := internal.LookupCommandInfo(commandName) + pc.state = (pc.state | ci.Set) &^ ci.Clear + return pc.c.Send(commandName, args...) +} + +func (pc *pooledConnection) Flush() error { + return pc.c.Flush() +} + +func (pc *pooledConnection) Receive() (reply interface{}, err error) { + return pc.c.Receive() +} + +type errorConnection struct{ err error } + +func (ec errorConnection) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err } +func (ec errorConnection) Send(string, ...interface{}) error { return ec.err } +func (ec errorConnection) Err() error { return ec.err } +func (ec errorConnection) Close() error { return ec.err } +func (ec errorConnection) Flush() error { return ec.err } +func (ec errorConnection) Receive() (interface{}, error) { return nil, ec.err } diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool_test.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool_test.go new file mode 100644 index 00000000000..1fe305f1685 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pool_test.go @@ -0,0 +1,674 @@ +// Copyright 2011 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis_test + +import ( + "errors" + "io" + "reflect" + "sync" + "testing" + "time" + + "github.com/garyburd/redigo/internal/redistest" + "github.com/garyburd/redigo/redis" +) + +type poolTestConn struct { + d *poolDialer + err error + redis.Conn +} + +func (c *poolTestConn) Close() error { c.d.open -= 1; return nil } +func (c *poolTestConn) Err() error { return c.err } + +func (c *poolTestConn) Do(commandName string, args ...interface{}) (reply interface{}, err error) { + if commandName == "ERR" { + c.err = args[0].(error) + commandName = "PING" + } + if commandName != "" { + c.d.commands = append(c.d.commands, commandName) + } + return c.Conn.Do(commandName, args...) +} + +func (c *poolTestConn) Send(commandName string, args ...interface{}) error { + c.d.commands = append(c.d.commands, commandName) + return c.Conn.Send(commandName, args...) +} + +type poolDialer struct { + t *testing.T + dialed int + open int + commands []string + dialErr error +} + +func (d *poolDialer) dial() (redis.Conn, error) { + d.dialed += 1 + if d.dialErr != nil { + return nil, d.dialErr + } + c, err := redistest.Dial() + if err != nil { + return nil, err + } + d.open += 1 + return &poolTestConn{d: d, Conn: c}, nil +} + +func (d *poolDialer) check(message string, p *redis.Pool, dialed, open int) { + if d.dialed != dialed { + d.t.Errorf("%s: dialed=%d, want %d", message, d.dialed, dialed) + } + if d.open != open { + d.t.Errorf("%s: open=%d, want %d", message, d.open, open) + } + if active := p.ActiveCount(); active != open { + d.t.Errorf("%s: active=%d, want %d", message, active, open) + } +} + +func TestPoolReuse(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + Dial: d.dial, + } + + for i := 0; i < 10; i++ { + c1 := p.Get() + c1.Do("PING") + c2 := p.Get() + c2.Do("PING") + c1.Close() + c2.Close() + } + + d.check("before close", p, 2, 2) + p.Close() + d.check("after close", p, 2, 0) +} + +func TestPoolMaxIdle(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + Dial: d.dial, + } + for i := 0; i < 10; i++ { + c1 := p.Get() + c1.Do("PING") + c2 := p.Get() + c2.Do("PING") + c3 := p.Get() + c3.Do("PING") + c1.Close() + c2.Close() + c3.Close() + } + d.check("before close", p, 12, 2) + p.Close() + d.check("after close", p, 12, 0) +} + +func TestPoolError(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + Dial: d.dial, + } + + c := p.Get() + c.Do("ERR", io.EOF) + if c.Err() == nil { + t.Errorf("expected c.Err() != nil") + } + c.Close() + + c = p.Get() + c.Do("ERR", io.EOF) + c.Close() + + d.check(".", p, 2, 0) +} + +func TestPoolClose(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + Dial: d.dial, + } + + c1 := p.Get() + c1.Do("PING") + c2 := p.Get() + c2.Do("PING") + c3 := p.Get() + c3.Do("PING") + + c1.Close() + if _, err := c1.Do("PING"); err == nil { + t.Errorf("expected error after connection closed") + } + + c2.Close() + c2.Close() + + p.Close() + + d.check("after pool close", p, 3, 1) + + if _, err := c1.Do("PING"); err == nil { + t.Errorf("expected error after connection and pool closed") + } + + c3.Close() + + d.check("after conn close", p, 3, 0) + + c1 = p.Get() + if _, err := c1.Do("PING"); err == nil { + t.Errorf("expected error after pool closed") + } +} + +func TestPoolTimeout(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + IdleTimeout: 300 * time.Second, + Dial: d.dial, + } + + now := time.Now() + redis.SetNowFunc(func() time.Time { return now }) + defer redis.SetNowFunc(time.Now) + + c := p.Get() + c.Do("PING") + c.Close() + + d.check("1", p, 1, 1) + + now = now.Add(p.IdleTimeout) + + c = p.Get() + c.Do("PING") + c.Close() + + d.check("2", p, 2, 1) + + p.Close() +} + +func TestPoolConcurrenSendReceive(t *testing.T) { + p := &redis.Pool{ + Dial: redistest.Dial, + } + c := p.Get() + done := make(chan error, 1) + go func() { + _, err := c.Receive() + done <- err + }() + c.Send("PING") + c.Flush() + err := <-done + if err != nil { + t.Fatalf("Receive() returned error %v", err) + } + _, err = c.Do("") + if err != nil { + t.Fatalf("Do() returned error %v", err) + } + c.Close() + p.Close() +} + +func TestPoolBorrowCheck(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + Dial: d.dial, + TestOnBorrow: func(redis.Conn, time.Time) error { return redis.Error("BLAH") }, + } + + for i := 0; i < 10; i++ { + c := p.Get() + c.Do("PING") + c.Close() + } + d.check("1", p, 10, 1) + p.Close() +} + +func TestPoolMaxActive(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + MaxActive: 2, + Dial: d.dial, + } + c1 := p.Get() + c1.Do("PING") + c2 := p.Get() + c2.Do("PING") + + d.check("1", p, 2, 2) + + c3 := p.Get() + if _, err := c3.Do("PING"); err != redis.ErrPoolExhausted { + t.Errorf("expected pool exhausted") + } + + c3.Close() + d.check("2", p, 2, 2) + c2.Close() + d.check("3", p, 2, 2) + + c3 = p.Get() + if _, err := c3.Do("PING"); err != nil { + t.Errorf("expected good channel, err=%v", err) + } + c3.Close() + + d.check("4", p, 2, 2) + p.Close() +} + +func TestPoolMonitorCleanup(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + MaxActive: 2, + Dial: d.dial, + } + c := p.Get() + c.Send("MONITOR") + c.Close() + + d.check("", p, 1, 0) + p.Close() +} + +func TestPoolPubSubCleanup(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + MaxActive: 2, + Dial: d.dial, + } + + c := p.Get() + c.Send("SUBSCRIBE", "x") + c.Close() + + want := []string{"SUBSCRIBE", "UNSUBSCRIBE", "PUNSUBSCRIBE", "ECHO"} + if !reflect.DeepEqual(d.commands, want) { + t.Errorf("got commands %v, want %v", d.commands, want) + } + d.commands = nil + + c = p.Get() + c.Send("PSUBSCRIBE", "x*") + c.Close() + + want = []string{"PSUBSCRIBE", "UNSUBSCRIBE", "PUNSUBSCRIBE", "ECHO"} + if !reflect.DeepEqual(d.commands, want) { + t.Errorf("got commands %v, want %v", d.commands, want) + } + d.commands = nil + + p.Close() +} + +func TestPoolTransactionCleanup(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 2, + MaxActive: 2, + Dial: d.dial, + } + + c := p.Get() + c.Do("WATCH", "key") + c.Do("PING") + c.Close() + + want := []string{"WATCH", "PING", "UNWATCH"} + if !reflect.DeepEqual(d.commands, want) { + t.Errorf("got commands %v, want %v", d.commands, want) + } + d.commands = nil + + c = p.Get() + c.Do("WATCH", "key") + c.Do("UNWATCH") + c.Do("PING") + c.Close() + + want = []string{"WATCH", "UNWATCH", "PING"} + if !reflect.DeepEqual(d.commands, want) { + t.Errorf("got commands %v, want %v", d.commands, want) + } + d.commands = nil + + c = p.Get() + c.Do("WATCH", "key") + c.Do("MULTI") + c.Do("PING") + c.Close() + + want = []string{"WATCH", "MULTI", "PING", "DISCARD"} + if !reflect.DeepEqual(d.commands, want) { + t.Errorf("got commands %v, want %v", d.commands, want) + } + d.commands = nil + + c = p.Get() + c.Do("WATCH", "key") + c.Do("MULTI") + c.Do("DISCARD") + c.Do("PING") + c.Close() + + want = []string{"WATCH", "MULTI", "DISCARD", "PING"} + if !reflect.DeepEqual(d.commands, want) { + t.Errorf("got commands %v, want %v", d.commands, want) + } + d.commands = nil + + c = p.Get() + c.Do("WATCH", "key") + c.Do("MULTI") + c.Do("EXEC") + c.Do("PING") + c.Close() + + want = []string{"WATCH", "MULTI", "EXEC", "PING"} + if !reflect.DeepEqual(d.commands, want) { + t.Errorf("got commands %v, want %v", d.commands, want) + } + d.commands = nil + + p.Close() +} + +func startGoroutines(p *redis.Pool, cmd string, args ...interface{}) chan error { + errs := make(chan error, 10) + for i := 0; i < cap(errs); i++ { + go func() { + c := p.Get() + _, err := c.Do(cmd, args...) + errs <- err + c.Close() + }() + } + + // Wait for goroutines to block. + time.Sleep(time.Second / 4) + + return errs +} + +func TestWaitPool(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 1, + MaxActive: 1, + Dial: d.dial, + Wait: true, + } + defer p.Close() + c := p.Get() + errs := startGoroutines(p, "PING") + d.check("before close", p, 1, 1) + c.Close() + timeout := time.After(2 * time.Second) + for i := 0; i < cap(errs); i++ { + select { + case err := <-errs: + if err != nil { + t.Fatal(err) + } + case <-timeout: + t.Fatalf("timeout waiting for blocked goroutine %d", i) + } + } + d.check("done", p, 1, 1) +} + +func TestWaitPoolClose(t *testing.T) { + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 1, + MaxActive: 1, + Dial: d.dial, + Wait: true, + } + c := p.Get() + if _, err := c.Do("PING"); err != nil { + t.Fatal(err) + } + errs := startGoroutines(p, "PING") + d.check("before close", p, 1, 1) + p.Close() + timeout := time.After(2 * time.Second) + for i := 0; i < cap(errs); i++ { + select { + case err := <-errs: + switch err { + case nil: + t.Fatal("blocked goroutine did not get error") + case redis.ErrPoolExhausted: + t.Fatal("blocked goroutine got pool exhausted error") + } + case <-timeout: + t.Fatal("timeout waiting for blocked goroutine") + } + } + c.Close() + d.check("done", p, 1, 0) +} + +func TestWaitPoolCommandError(t *testing.T) { + testErr := errors.New("test") + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 1, + MaxActive: 1, + Dial: d.dial, + Wait: true, + } + defer p.Close() + c := p.Get() + errs := startGoroutines(p, "ERR", testErr) + d.check("before close", p, 1, 1) + c.Close() + timeout := time.After(2 * time.Second) + for i := 0; i < cap(errs); i++ { + select { + case err := <-errs: + if err != nil { + t.Fatal(err) + } + case <-timeout: + t.Fatalf("timeout waiting for blocked goroutine %d", i) + } + } + d.check("done", p, cap(errs), 0) +} + +func TestWaitPoolDialError(t *testing.T) { + testErr := errors.New("test") + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: 1, + MaxActive: 1, + Dial: d.dial, + Wait: true, + } + defer p.Close() + c := p.Get() + errs := startGoroutines(p, "ERR", testErr) + d.check("before close", p, 1, 1) + + d.dialErr = errors.New("dial") + c.Close() + + nilCount := 0 + errCount := 0 + timeout := time.After(2 * time.Second) + for i := 0; i < cap(errs); i++ { + select { + case err := <-errs: + switch err { + case nil: + nilCount++ + case d.dialErr: + errCount++ + default: + t.Fatalf("expected dial error or nil, got %v", err) + } + case <-timeout: + t.Fatalf("timeout waiting for blocked goroutine %d", i) + } + } + if nilCount != 1 { + t.Errorf("expected one nil error, got %d", nilCount) + } + if errCount != cap(errs)-1 { + t.Errorf("expected %d dial erors, got %d", cap(errs)-1, errCount) + } + d.check("done", p, cap(errs), 0) +} + +// Borrowing requires us to iterate over the idle connections, unlock the pool, +// and perform a blocking operation to check the connection still works. If +// TestOnBorrow fails, we must reacquire the lock and continue iteration. This +// test ensures that iteration will work correctly if multiple threads are +// iterating simultaneously. +func TestLocking_TestOnBorrowFails_PoolDoesntCrash(t *testing.T) { + count := 100 + + // First we'll Create a pool where the pilfering of idle connections fails. + d := poolDialer{t: t} + p := &redis.Pool{ + MaxIdle: count, + MaxActive: count, + Dial: d.dial, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + return errors.New("No way back into the real world.") + }, + } + defer p.Close() + + // Fill the pool with idle connections. + b1 := sync.WaitGroup{} + b1.Add(count) + b2 := sync.WaitGroup{} + b2.Add(count) + for i := 0; i < count; i++ { + go func() { + c := p.Get() + if c.Err() != nil { + t.Errorf("pool get failed: %v", c.Err()) + } + b1.Done() + b1.Wait() + c.Close() + b2.Done() + }() + } + b2.Wait() + if d.dialed != count { + t.Errorf("Expected %d dials, got %d", count, d.dialed) + } + + // Spawn a bunch of goroutines to thrash the pool. + b2.Add(count) + for i := 0; i < count; i++ { + go func() { + c := p.Get() + if c.Err() != nil { + t.Errorf("pool get failed: %v", c.Err()) + } + c.Close() + b2.Done() + }() + } + b2.Wait() + if d.dialed != count*2 { + t.Errorf("Expected %d dials, got %d", count*2, d.dialed) + } +} + +func BenchmarkPoolGet(b *testing.B) { + b.StopTimer() + p := redis.Pool{Dial: redistest.Dial, MaxIdle: 2} + c := p.Get() + if err := c.Err(); err != nil { + b.Fatal(err) + } + c.Close() + defer p.Close() + b.StartTimer() + for i := 0; i < b.N; i++ { + c = p.Get() + c.Close() + } +} + +func BenchmarkPoolGetErr(b *testing.B) { + b.StopTimer() + p := redis.Pool{Dial: redistest.Dial, MaxIdle: 2} + c := p.Get() + if err := c.Err(); err != nil { + b.Fatal(err) + } + c.Close() + defer p.Close() + b.StartTimer() + for i := 0; i < b.N; i++ { + c = p.Get() + if err := c.Err(); err != nil { + b.Fatal(err) + } + c.Close() + } +} + +func BenchmarkPoolGetPing(b *testing.B) { + b.StopTimer() + p := redis.Pool{Dial: redistest.Dial, MaxIdle: 2} + c := p.Get() + if err := c.Err(); err != nil { + b.Fatal(err) + } + c.Close() + defer p.Close() + b.StartTimer() + for i := 0; i < b.N; i++ { + c = p.Get() + if _, err := c.Do("PING"); err != nil { + b.Fatal(err) + } + c.Close() + } +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub.go new file mode 100644 index 00000000000..c0ecce824d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub.go @@ -0,0 +1,144 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import "errors" + +// Subscription represents a subscribe or unsubscribe notification. +type Subscription struct { + + // Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe" + Kind string + + // The channel that was changed. + Channel string + + // The current number of subscriptions for connection. + Count int +} + +// Message represents a message notification. +type Message struct { + + // The originating channel. + Channel string + + // The message data. + Data []byte +} + +// PMessage represents a pmessage notification. +type PMessage struct { + + // The matched pattern. + Pattern string + + // The originating channel. + Channel string + + // The message data. + Data []byte +} + +// Pong represents a pubsub pong notification. +type Pong struct { + Data string +} + +// PubSubConn wraps a Conn with convenience methods for subscribers. +type PubSubConn struct { + Conn Conn +} + +// Close closes the connection. +func (c PubSubConn) Close() error { + return c.Conn.Close() +} + +// Subscribe subscribes the connection to the specified channels. +func (c PubSubConn) Subscribe(channel ...interface{}) error { + c.Conn.Send("SUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// PSubscribe subscribes the connection to the given patterns. +func (c PubSubConn) PSubscribe(channel ...interface{}) error { + c.Conn.Send("PSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// Unsubscribe unsubscribes the connection from the given channels, or from all +// of them if none is given. +func (c PubSubConn) Unsubscribe(channel ...interface{}) error { + c.Conn.Send("UNSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// PUnsubscribe unsubscribes the connection from the given patterns, or from all +// of them if none is given. +func (c PubSubConn) PUnsubscribe(channel ...interface{}) error { + c.Conn.Send("PUNSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// Ping sends a PING to the server with the specified data. +func (c PubSubConn) Ping(data string) error { + c.Conn.Send("PING", data) + return c.Conn.Flush() +} + +// Receive returns a pushed message as a Subscription, Message, PMessage, Pong +// or error. The return value is intended to be used directly in a type switch +// as illustrated in the PubSubConn example. +func (c PubSubConn) Receive() interface{} { + reply, err := Values(c.Conn.Receive()) + if err != nil { + return err + } + + var kind string + reply, err = Scan(reply, &kind) + if err != nil { + return err + } + + switch kind { + case "message": + var m Message + if _, err := Scan(reply, &m.Channel, &m.Data); err != nil { + return err + } + return m + case "pmessage": + var pm PMessage + if _, err := Scan(reply, &pm.Pattern, &pm.Channel, &pm.Data); err != nil { + return err + } + return pm + case "subscribe", "psubscribe", "unsubscribe", "punsubscribe": + s := Subscription{Kind: kind} + if _, err := Scan(reply, &s.Channel, &s.Count); err != nil { + return err + } + return s + case "pong": + var p Pong + if _, err := Scan(reply, &p.Data); err != nil { + return err + } + return p + } + return errors.New("redigo: unknown pubsub notification") +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub_test.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub_test.go new file mode 100644 index 00000000000..365a5882193 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/pubsub_test.go @@ -0,0 +1,150 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis_test + +import ( + "fmt" + "net" + "reflect" + "sync" + "testing" + "time" + + "github.com/garyburd/redigo/internal/redistest" + "github.com/garyburd/redigo/redis" +) + +func publish(channel, value interface{}) { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + c.Do("PUBLISH", channel, value) +} + +// Applications can receive pushed messages from one goroutine and manage subscriptions from another goroutine. +func ExamplePubSubConn() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + var wg sync.WaitGroup + wg.Add(2) + + psc := redis.PubSubConn{Conn: c} + + // This goroutine receives and prints pushed notifications from the server. + // The goroutine exits when the connection is unsubscribed from all + // channels or there is an error. + go func() { + defer wg.Done() + for { + switch n := psc.Receive().(type) { + case redis.Message: + fmt.Printf("Message: %s %s\n", n.Channel, n.Data) + case redis.PMessage: + fmt.Printf("PMessage: %s %s %s\n", n.Pattern, n.Channel, n.Data) + case redis.Subscription: + fmt.Printf("Subscription: %s %s %d\n", n.Kind, n.Channel, n.Count) + if n.Count == 0 { + return + } + case error: + fmt.Printf("error: %v\n", n) + return + } + } + }() + + // This goroutine manages subscriptions for the connection. + go func() { + defer wg.Done() + + psc.Subscribe("example") + psc.PSubscribe("p*") + + // The following function calls publish a message using another + // connection to the Redis server. + publish("example", "hello") + publish("example", "world") + publish("pexample", "foo") + publish("pexample", "bar") + + // Unsubscribe from all connections. This will cause the receiving + // goroutine to exit. + psc.Unsubscribe() + psc.PUnsubscribe() + }() + + wg.Wait() + + // Output: + // Subscription: subscribe example 1 + // Subscription: psubscribe p* 2 + // Message: example hello + // Message: example world + // PMessage: p* pexample foo + // PMessage: p* pexample bar + // Subscription: unsubscribe example 1 + // Subscription: punsubscribe p* 0 +} + +func expectPushed(t *testing.T, c redis.PubSubConn, message string, expected interface{}) { + actual := c.Receive() + if !reflect.DeepEqual(actual, expected) { + t.Errorf("%s = %v, want %v", message, actual, expected) + } +} + +func TestPushed(t *testing.T) { + pc, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer pc.Close() + + nc, err := net.Dial("tcp", ":6379") + if err != nil { + t.Fatal(err) + } + defer nc.Close() + nc.SetReadDeadline(time.Now().Add(4 * time.Second)) + + c := redis.PubSubConn{Conn: redis.NewConn(nc, 0, 0)} + + c.Subscribe("c1") + expectPushed(t, c, "Subscribe(c1)", redis.Subscription{Kind: "subscribe", Channel: "c1", Count: 1}) + c.Subscribe("c2") + expectPushed(t, c, "Subscribe(c2)", redis.Subscription{Kind: "subscribe", Channel: "c2", Count: 2}) + c.PSubscribe("p1") + expectPushed(t, c, "PSubscribe(p1)", redis.Subscription{Kind: "psubscribe", Channel: "p1", Count: 3}) + c.PSubscribe("p2") + expectPushed(t, c, "PSubscribe(p2)", redis.Subscription{Kind: "psubscribe", Channel: "p2", Count: 4}) + c.PUnsubscribe() + expectPushed(t, c, "Punsubscribe(p1)", redis.Subscription{Kind: "punsubscribe", Channel: "p1", Count: 3}) + expectPushed(t, c, "Punsubscribe()", redis.Subscription{Kind: "punsubscribe", Channel: "p2", Count: 2}) + + pc.Do("PUBLISH", "c1", "hello") + expectPushed(t, c, "PUBLISH c1 hello", redis.Message{Channel: "c1", Data: []byte("hello")}) + + c.Ping("hello") + expectPushed(t, c, `Ping("hello")`, redis.Pong{"hello"}) + + c.Conn.Send("PING") + c.Conn.Flush() + expectPushed(t, c, `Send("PING")`, redis.Pong{}) +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/redis.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/redis.go new file mode 100644 index 00000000000..c90a48ed44b --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/redis.go @@ -0,0 +1,44 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +// Error represents an error returned in a command reply. +type Error string + +func (err Error) Error() string { return string(err) } + +// Conn represents a connection to a Redis server. +type Conn interface { + // Close closes the connection. + Close() error + + // Err returns a non-nil value if the connection is broken. The returned + // value is either the first non-nil value returned from the underlying + // network connection or a protocol parsing error. Applications should + // close broken connections. + Err() error + + // Do sends a command to the server and returns the received reply. + Do(commandName string, args ...interface{}) (reply interface{}, err error) + + // Send writes the command to the client's output buffer. + Send(commandName string, args ...interface{}) error + + // Flush flushes the output buffer to the Redis server. + Flush() error + + // Receive receives a single reply from the Redis server + Receive() (reply interface{}, err error) +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply.go new file mode 100644 index 00000000000..5af29bf5111 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply.go @@ -0,0 +1,364 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "fmt" + "strconv" +) + +// ErrNil indicates that a reply value is nil. +var ErrNil = errors.New("redigo: nil returned") + +// Int is a helper that converts a command reply to an integer. If err is not +// equal to nil, then Int returns 0, err. Otherwise, Int converts the +// reply to an int as follows: +// +// Reply type Result +// integer int(reply), nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Int(reply interface{}, err error) (int, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + x := int(reply) + if int64(x) != reply { + return 0, strconv.ErrRange + } + return x, nil + case []byte: + n, err := strconv.ParseInt(string(reply), 10, 0) + return int(n), err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply) +} + +// Int64 is a helper that converts a command reply to 64 bit integer. If err is +// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the +// reply to an int64 as follows: +// +// Reply type Result +// integer reply, nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Int64(reply interface{}, err error) (int64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + return reply, nil + case []byte: + n, err := strconv.ParseInt(string(reply), 10, 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply) +} + +var errNegativeInt = errors.New("redigo: unexpected value for Uint64") + +// Uint64 is a helper that converts a command reply to 64 bit integer. If err is +// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the +// reply to an int64 as follows: +// +// Reply type Result +// integer reply, nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Uint64(reply interface{}, err error) (uint64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + if reply < 0 { + return 0, errNegativeInt + } + return uint64(reply), nil + case []byte: + n, err := strconv.ParseUint(string(reply), 10, 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply) +} + +// Float64 is a helper that converts a command reply to 64 bit float. If err is +// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts +// the reply to an int as follows: +// +// Reply type Result +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Float64(reply interface{}, err error) (float64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case []byte: + n, err := strconv.ParseFloat(string(reply), 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply) +} + +// String is a helper that converts a command reply to a string. If err is not +// equal to nil, then String returns "", err. Otherwise String converts the +// reply to a string as follows: +// +// Reply type Result +// bulk string string(reply), nil +// simple string reply, nil +// nil "", ErrNil +// other "", error +func String(reply interface{}, err error) (string, error) { + if err != nil { + return "", err + } + switch reply := reply.(type) { + case []byte: + return string(reply), nil + case string: + return reply, nil + case nil: + return "", ErrNil + case Error: + return "", reply + } + return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply) +} + +// Bytes is a helper that converts a command reply to a slice of bytes. If err +// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts +// the reply to a slice of bytes as follows: +// +// Reply type Result +// bulk string reply, nil +// simple string []byte(reply), nil +// nil nil, ErrNil +// other nil, error +func Bytes(reply interface{}, err error) ([]byte, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []byte: + return reply, nil + case string: + return []byte(reply), nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply) +} + +// Bool is a helper that converts a command reply to a boolean. If err is not +// equal to nil, then Bool returns false, err. Otherwise Bool converts the +// reply to boolean as follows: +// +// Reply type Result +// integer value != 0, nil +// bulk string strconv.ParseBool(reply) +// nil false, ErrNil +// other false, error +func Bool(reply interface{}, err error) (bool, error) { + if err != nil { + return false, err + } + switch reply := reply.(type) { + case int64: + return reply != 0, nil + case []byte: + return strconv.ParseBool(string(reply)) + case nil: + return false, ErrNil + case Error: + return false, reply + } + return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply) +} + +// MultiBulk is deprecated. Use Values. +func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) } + +// Values is a helper that converts an array command reply to a []interface{}. +// If err is not equal to nil, then Values returns nil, err. Otherwise, Values +// converts the reply as follows: +// +// Reply type Result +// array reply, nil +// nil nil, ErrNil +// other nil, error +func Values(reply interface{}, err error) ([]interface{}, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []interface{}: + return reply, nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply) +} + +// Strings is a helper that converts an array command reply to a []string. If +// err is not equal to nil, then Strings returns nil, err. Nil array items are +// converted to "" in the output slice. Strings returns an error if an array +// item is not a bulk string or nil. +func Strings(reply interface{}, err error) ([]string, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []interface{}: + result := make([]string, len(reply)) + for i := range reply { + if reply[i] == nil { + continue + } + p, ok := reply[i].([]byte) + if !ok { + return nil, fmt.Errorf("redigo: unexpected element type for Strings, got type %T", reply[i]) + } + result[i] = string(p) + } + return result, nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Strings, got type %T", reply) +} + +// Ints is a helper that converts an array command reply to a []int. If +// err is not equal to nil, then Ints returns nil, err. +func Ints(reply interface{}, err error) ([]int, error) { + var ints []int + if reply == nil { + return ints, ErrNil + } + values, err := Values(reply, err) + if err != nil { + return ints, err + } + if err := ScanSlice(values, &ints); err != nil { + return ints, err + } + return ints, nil +} + +// StringMap is a helper that converts an array of strings (alternating key, value) +// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format. +// Requires an even number of values in result. +func StringMap(result interface{}, err error) (map[string]string, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: StringMap expects even number of values result") + } + m := make(map[string]string, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, okKey := values[i].([]byte) + value, okValue := values[i+1].([]byte) + if !okKey || !okValue { + return nil, errors.New("redigo: ScanMap key not a bulk string value") + } + m[string(key)] = string(value) + } + return m, nil +} + +// IntMap is a helper that converts an array of strings (alternating key, value) +// into a map[string]int. The HGETALL commands return replies in this format. +// Requires an even number of values in result. +func IntMap(result interface{}, err error) (map[string]int, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: IntMap expects even number of values result") + } + m := make(map[string]int, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, ok := values[i].([]byte) + if !ok { + return nil, errors.New("redigo: ScanMap key not a bulk string value") + } + value, err := Int(values[i+1], nil) + if err != nil { + return nil, err + } + m[string(key)] = value + } + return m, nil +} + +// Int64Map is a helper that converts an array of strings (alternating key, value) +// into a map[string]int64. The HGETALL commands return replies in this format. +// Requires an even number of values in result. +func Int64Map(result interface{}, err error) (map[string]int64, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: Int64Map expects even number of values result") + } + m := make(map[string]int64, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, ok := values[i].([]byte) + if !ok { + return nil, errors.New("redigo: ScanMap key not a bulk string value") + } + value, err := Int64(values[i+1], nil) + if err != nil { + return nil, err + } + m[string(key)] = value + } + return m, nil +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply_test.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply_test.go new file mode 100644 index 00000000000..92744c590b6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/reply_test.go @@ -0,0 +1,166 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/garyburd/redigo/internal/redistest" + "github.com/garyburd/redigo/redis" +) + +type valueError struct { + v interface{} + err error +} + +func ve(v interface{}, err error) valueError { + return valueError{v, err} +} + +var replyTests = []struct { + name interface{} + actual valueError + expected valueError +}{ + { + "ints([v1, v2])", + ve(redis.Ints([]interface{}{[]byte("4"), []byte("5")}, nil)), + ve([]int{4, 5}, nil), + }, + { + "ints(nil)", + ve(redis.Ints(nil, nil)), + ve([]int(nil), redis.ErrNil), + }, + { + "strings([v1, v2])", + ve(redis.Strings([]interface{}{[]byte("v1"), []byte("v2")}, nil)), + ve([]string{"v1", "v2"}, nil), + }, + { + "strings(nil)", + ve(redis.Strings(nil, nil)), + ve([]string(nil), redis.ErrNil), + }, + { + "values([v1, v2])", + ve(redis.Values([]interface{}{[]byte("v1"), []byte("v2")}, nil)), + ve([]interface{}{[]byte("v1"), []byte("v2")}, nil), + }, + { + "values(nil)", + ve(redis.Values(nil, nil)), + ve([]interface{}(nil), redis.ErrNil), + }, + { + "float64(1.0)", + ve(redis.Float64([]byte("1.0"), nil)), + ve(float64(1.0), nil), + }, + { + "float64(nil)", + ve(redis.Float64(nil, nil)), + ve(float64(0.0), redis.ErrNil), + }, + { + "uint64(1)", + ve(redis.Uint64(int64(1), nil)), + ve(uint64(1), nil), + }, + { + "uint64(-1)", + ve(redis.Uint64(int64(-1), nil)), + ve(uint64(0), redis.ErrNegativeInt), + }, +} + +func TestReply(t *testing.T) { + for _, rt := range replyTests { + if rt.actual.err != rt.expected.err { + t.Errorf("%s returned err %v, want %v", rt.name, rt.actual.err, rt.expected.err) + continue + } + if !reflect.DeepEqual(rt.actual.v, rt.expected.v) { + t.Errorf("%s=%+v, want %+v", rt.name, rt.actual.v, rt.expected.v) + } + } +} + +// dial wraps DialTestDB() with a more suitable function name for examples. +func dial() (redis.Conn, error) { + return redistest.Dial() +} + +func ExampleBool() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + + c.Do("SET", "foo", 1) + exists, _ := redis.Bool(c.Do("EXISTS", "foo")) + fmt.Printf("%#v\n", exists) + // Output: + // true +} + +func ExampleInt() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + + c.Do("SET", "k1", 1) + n, _ := redis.Int(c.Do("GET", "k1")) + fmt.Printf("%#v\n", n) + n, _ = redis.Int(c.Do("INCR", "k1")) + fmt.Printf("%#v\n", n) + // Output: + // 1 + // 2 +} + +func ExampleInts() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + + c.Do("SADD", "set_with_integers", 4, 5, 6) + ints, _ := redis.Ints(c.Do("SMEMBERS", "set_with_integers")) + fmt.Printf("%#v\n", ints) + // Output: + // []int{4, 5, 6} +} + +func ExampleString() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + + c.Do("SET", "hello", "world") + s, err := redis.String(c.Do("GET", "hello")) + fmt.Printf("%#v\n", s) + // Output: + // "world" +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan.go new file mode 100644 index 00000000000..8c9cfa18d47 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan.go @@ -0,0 +1,513 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "sync" +) + +func ensureLen(d reflect.Value, n int) { + if n > d.Cap() { + d.Set(reflect.MakeSlice(d.Type(), n, n)) + } else { + d.SetLen(n) + } +} + +func cannotConvert(d reflect.Value, s interface{}) error { + return fmt.Errorf("redigo: Scan cannot convert from %s to %s", + reflect.TypeOf(s), d.Type()) +} + +func convertAssignBytes(d reflect.Value, s []byte) (err error) { + switch d.Type().Kind() { + case reflect.Float32, reflect.Float64: + var x float64 + x, err = strconv.ParseFloat(string(s), d.Type().Bits()) + d.SetFloat(x) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var x int64 + x, err = strconv.ParseInt(string(s), 10, d.Type().Bits()) + d.SetInt(x) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + var x uint64 + x, err = strconv.ParseUint(string(s), 10, d.Type().Bits()) + d.SetUint(x) + case reflect.Bool: + var x bool + x, err = strconv.ParseBool(string(s)) + d.SetBool(x) + case reflect.String: + d.SetString(string(s)) + case reflect.Slice: + if d.Type().Elem().Kind() != reflect.Uint8 { + err = cannotConvert(d, s) + } else { + d.SetBytes(s) + } + default: + err = cannotConvert(d, s) + } + return +} + +func convertAssignInt(d reflect.Value, s int64) (err error) { + switch d.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + d.SetInt(s) + if d.Int() != s { + err = strconv.ErrRange + d.SetInt(0) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if s < 0 { + err = strconv.ErrRange + } else { + x := uint64(s) + d.SetUint(x) + if d.Uint() != x { + err = strconv.ErrRange + d.SetUint(0) + } + } + case reflect.Bool: + d.SetBool(s != 0) + default: + err = cannotConvert(d, s) + } + return +} + +func convertAssignValue(d reflect.Value, s interface{}) (err error) { + switch s := s.(type) { + case []byte: + err = convertAssignBytes(d, s) + case int64: + err = convertAssignInt(d, s) + default: + err = cannotConvert(d, s) + } + return err +} + +func convertAssignValues(d reflect.Value, s []interface{}) error { + if d.Type().Kind() != reflect.Slice { + return cannotConvert(d, s) + } + ensureLen(d, len(s)) + for i := 0; i < len(s); i++ { + if err := convertAssignValue(d.Index(i), s[i]); err != nil { + return err + } + } + return nil +} + +func convertAssign(d interface{}, s interface{}) (err error) { + // Handle the most common destination types using type switches and + // fall back to reflection for all other types. + switch s := s.(type) { + case nil: + // ingore + case []byte: + switch d := d.(type) { + case *string: + *d = string(s) + case *int: + *d, err = strconv.Atoi(string(s)) + case *bool: + *d, err = strconv.ParseBool(string(s)) + case *[]byte: + *d = s + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignBytes(d.Elem(), s) + } + } + case int64: + switch d := d.(type) { + case *int: + x := int(s) + if int64(x) != s { + err = strconv.ErrRange + x = 0 + } + *d = x + case *bool: + *d = s != 0 + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignInt(d.Elem(), s) + } + } + case []interface{}: + switch d := d.(type) { + case *[]interface{}: + *d = s + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignValues(d.Elem(), s) + } + } + case Error: + err = s + default: + err = cannotConvert(reflect.ValueOf(d), s) + } + return +} + +// Scan copies from src to the values pointed at by dest. +// +// The values pointed at by dest must be an integer, float, boolean, string, +// []byte, interface{} or slices of these types. Scan uses the standard strconv +// package to convert bulk strings to numeric and boolean types. +// +// If a dest value is nil, then the corresponding src value is skipped. +// +// If a src element is nil, then the corresponding dest value is not modified. +// +// To enable easy use of Scan in a loop, Scan returns the slice of src +// following the copied values. +func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) { + if len(src) < len(dest) { + return nil, errors.New("redigo: Scan array short") + } + var err error + for i, d := range dest { + err = convertAssign(d, src[i]) + if err != nil { + break + } + } + return src[len(dest):], err +} + +type fieldSpec struct { + name string + index []int + //omitEmpty bool +} + +type structSpec struct { + m map[string]*fieldSpec + l []*fieldSpec +} + +func (ss *structSpec) fieldSpec(name []byte) *fieldSpec { + return ss.m[string(name)] +} + +func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) { + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + switch { + case f.PkgPath != "": + // Ignore unexported fields. + case f.Anonymous: + // TODO: Handle pointers. Requires change to decoder and + // protection against infinite recursion. + if f.Type.Kind() == reflect.Struct { + compileStructSpec(f.Type, depth, append(index, i), ss) + } + default: + fs := &fieldSpec{name: f.Name} + tag := f.Tag.Get("redis") + p := strings.Split(tag, ",") + if len(p) > 0 { + if p[0] == "-" { + continue + } + if len(p[0]) > 0 { + fs.name = p[0] + } + for _, s := range p[1:] { + switch s { + //case "omitempty": + // fs.omitempty = true + default: + panic(errors.New("redigo: unknown field flag " + s + " for type " + t.Name())) + } + } + } + d, found := depth[fs.name] + if !found { + d = 1 << 30 + } + switch { + case len(index) == d: + // At same depth, remove from result. + delete(ss.m, fs.name) + j := 0 + for i := 0; i < len(ss.l); i++ { + if fs.name != ss.l[i].name { + ss.l[j] = ss.l[i] + j += 1 + } + } + ss.l = ss.l[:j] + case len(index) < d: + fs.index = make([]int, len(index)+1) + copy(fs.index, index) + fs.index[len(index)] = i + depth[fs.name] = len(index) + ss.m[fs.name] = fs + ss.l = append(ss.l, fs) + } + } + } +} + +var ( + structSpecMutex sync.RWMutex + structSpecCache = make(map[reflect.Type]*structSpec) + defaultFieldSpec = &fieldSpec{} +) + +func structSpecForType(t reflect.Type) *structSpec { + + structSpecMutex.RLock() + ss, found := structSpecCache[t] + structSpecMutex.RUnlock() + if found { + return ss + } + + structSpecMutex.Lock() + defer structSpecMutex.Unlock() + ss, found = structSpecCache[t] + if found { + return ss + } + + ss = &structSpec{m: make(map[string]*fieldSpec)} + compileStructSpec(t, make(map[string]int), nil, ss) + structSpecCache[t] = ss + return ss +} + +var errScanStructValue = errors.New("redigo: ScanStruct value must be non-nil pointer to a struct") + +// ScanStruct scans alternating names and values from src to a struct. The +// HGETALL and CONFIG GET commands return replies in this format. +// +// ScanStruct uses exported field names to match values in the response. Use +// 'redis' field tag to override the name: +// +// Field int `redis:"myName"` +// +// Fields with the tag redis:"-" are ignored. +// +// Integer, float, boolean, string and []byte fields are supported. Scan uses the +// standard strconv package to convert bulk string values to numeric and +// boolean types. +// +// If a src element is nil, then the corresponding field is not modified. +func ScanStruct(src []interface{}, dest interface{}) error { + d := reflect.ValueOf(dest) + if d.Kind() != reflect.Ptr || d.IsNil() { + return errScanStructValue + } + d = d.Elem() + if d.Kind() != reflect.Struct { + return errScanStructValue + } + ss := structSpecForType(d.Type()) + + if len(src)%2 != 0 { + return errors.New("redigo: ScanStruct expects even number of values in values") + } + + for i := 0; i < len(src); i += 2 { + s := src[i+1] + if s == nil { + continue + } + name, ok := src[i].([]byte) + if !ok { + return errors.New("redigo: ScanStruct key not a bulk string value") + } + fs := ss.fieldSpec(name) + if fs == nil { + continue + } + if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { + return err + } + } + return nil +} + +var ( + errScanSliceValue = errors.New("redigo: ScanSlice dest must be non-nil pointer to a struct") +) + +// ScanSlice scans src to the slice pointed to by dest. The elements the dest +// slice must be integer, float, boolean, string, struct or pointer to struct +// values. +// +// Struct fields must be integer, float, boolean or string values. All struct +// fields are used unless a subset is specified using fieldNames. +func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error { + d := reflect.ValueOf(dest) + if d.Kind() != reflect.Ptr || d.IsNil() { + return errScanSliceValue + } + d = d.Elem() + if d.Kind() != reflect.Slice { + return errScanSliceValue + } + + isPtr := false + t := d.Type().Elem() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + isPtr = true + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + ensureLen(d, len(src)) + for i, s := range src { + if s == nil { + continue + } + if err := convertAssignValue(d.Index(i), s); err != nil { + return err + } + } + return nil + } + + ss := structSpecForType(t) + fss := ss.l + if len(fieldNames) > 0 { + fss = make([]*fieldSpec, len(fieldNames)) + for i, name := range fieldNames { + fss[i] = ss.m[name] + if fss[i] == nil { + return errors.New("redigo: ScanSlice bad field name " + name) + } + } + } + + if len(fss) == 0 { + return errors.New("redigo: ScanSlice no struct fields") + } + + n := len(src) / len(fss) + if n*len(fss) != len(src) { + return errors.New("redigo: ScanSlice length not a multiple of struct field count") + } + + ensureLen(d, n) + for i := 0; i < n; i++ { + d := d.Index(i) + if isPtr { + if d.IsNil() { + d.Set(reflect.New(t)) + } + d = d.Elem() + } + for j, fs := range fss { + s := src[i*len(fss)+j] + if s == nil { + continue + } + if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { + return err + } + } + } + return nil +} + +// Args is a helper for constructing command arguments from structured values. +type Args []interface{} + +// Add returns the result of appending value to args. +func (args Args) Add(value ...interface{}) Args { + return append(args, value...) +} + +// AddFlat returns the result of appending the flattened value of v to args. +// +// Maps are flattened by appending the alternating keys and map values to args. +// +// Slices are flattened by appending the slice elements to args. +// +// Structs are flattened by appending the alternating names and values of +// exported fields to args. If v is a nil struct pointer, then nothing is +// appended. The 'redis' field tag overrides struct field names. See ScanStruct +// for more information on the use of the 'redis' field tag. +// +// Other types are appended to args as is. +func (args Args) AddFlat(v interface{}) Args { + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Struct: + args = flattenStruct(args, rv) + case reflect.Slice: + for i := 0; i < rv.Len(); i++ { + args = append(args, rv.Index(i).Interface()) + } + case reflect.Map: + for _, k := range rv.MapKeys() { + args = append(args, k.Interface(), rv.MapIndex(k).Interface()) + } + case reflect.Ptr: + if rv.Type().Elem().Kind() == reflect.Struct { + if !rv.IsNil() { + args = flattenStruct(args, rv.Elem()) + } + } else { + args = append(args, v) + } + default: + args = append(args, v) + } + return args +} + +func flattenStruct(args Args, v reflect.Value) Args { + ss := structSpecForType(v.Type()) + for _, fs := range ss.l { + fv := v.FieldByIndex(fs.index) + args = append(args, fs.name, fv.Interface()) + } + return args +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan_test.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan_test.go new file mode 100644 index 00000000000..b57dd89695e --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/scan_test.go @@ -0,0 +1,412 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis_test + +import ( + "fmt" + "github.com/garyburd/redigo/redis" + "math" + "reflect" + "testing" +) + +var scanConversionTests = []struct { + src interface{} + dest interface{} +}{ + {[]byte("-inf"), math.Inf(-1)}, + {[]byte("+inf"), math.Inf(1)}, + {[]byte("0"), float64(0)}, + {[]byte("3.14159"), float64(3.14159)}, + {[]byte("3.14"), float32(3.14)}, + {[]byte("-100"), int(-100)}, + {[]byte("101"), int(101)}, + {int64(102), int(102)}, + {[]byte("103"), uint(103)}, + {int64(104), uint(104)}, + {[]byte("105"), int8(105)}, + {int64(106), int8(106)}, + {[]byte("107"), uint8(107)}, + {int64(108), uint8(108)}, + {[]byte("0"), false}, + {int64(0), false}, + {[]byte("f"), false}, + {[]byte("1"), true}, + {int64(1), true}, + {[]byte("t"), true}, + {[]byte("hello"), "hello"}, + {[]byte("world"), []byte("world")}, + {[]interface{}{[]byte("foo")}, []interface{}{[]byte("foo")}}, + {[]interface{}{[]byte("foo")}, []string{"foo"}}, + {[]interface{}{[]byte("hello"), []byte("world")}, []string{"hello", "world"}}, + {[]interface{}{[]byte("bar")}, [][]byte{[]byte("bar")}}, + {[]interface{}{[]byte("1")}, []int{1}}, + {[]interface{}{[]byte("1"), []byte("2")}, []int{1, 2}}, + {[]interface{}{[]byte("1"), []byte("2")}, []float64{1, 2}}, + {[]interface{}{[]byte("1")}, []byte{1}}, + {[]interface{}{[]byte("1")}, []bool{true}}, +} + +func TestScanConversion(t *testing.T) { + for _, tt := range scanConversionTests { + values := []interface{}{tt.src} + dest := reflect.New(reflect.TypeOf(tt.dest)) + values, err := redis.Scan(values, dest.Interface()) + if err != nil { + t.Errorf("Scan(%v) returned error %v", tt, err) + continue + } + if !reflect.DeepEqual(tt.dest, dest.Elem().Interface()) { + t.Errorf("Scan(%v) returned %v, want %v", tt, dest.Elem().Interface(), tt.dest) + } + } +} + +var scanConversionErrorTests = []struct { + src interface{} + dest interface{} +}{ + {[]byte("1234"), byte(0)}, + {int64(1234), byte(0)}, + {[]byte("-1"), byte(0)}, + {int64(-1), byte(0)}, + {[]byte("junk"), false}, + {redis.Error("blah"), false}, +} + +func TestScanConversionError(t *testing.T) { + for _, tt := range scanConversionErrorTests { + values := []interface{}{tt.src} + dest := reflect.New(reflect.TypeOf(tt.dest)) + values, err := redis.Scan(values, dest.Interface()) + if err == nil { + t.Errorf("Scan(%v) did not return error", tt) + } + } +} + +func ExampleScan() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + + c.Send("HMSET", "album:1", "title", "Red", "rating", 5) + c.Send("HMSET", "album:2", "title", "Earthbound", "rating", 1) + c.Send("HMSET", "album:3", "title", "Beat") + c.Send("LPUSH", "albums", "1") + c.Send("LPUSH", "albums", "2") + c.Send("LPUSH", "albums", "3") + values, err := redis.Values(c.Do("SORT", "albums", + "BY", "album:*->rating", + "GET", "album:*->title", + "GET", "album:*->rating")) + if err != nil { + panic(err) + } + + for len(values) > 0 { + var title string + rating := -1 // initialize to illegal value to detect nil. + values, err = redis.Scan(values, &title, &rating) + if err != nil { + panic(err) + } + if rating == -1 { + fmt.Println(title, "not-rated") + } else { + fmt.Println(title, rating) + } + } + // Output: + // Beat not-rated + // Earthbound 1 + // Red 5 +} + +type s0 struct { + X int + Y int `redis:"y"` + Bt bool +} + +type s1 struct { + X int `redis:"-"` + I int `redis:"i"` + U uint `redis:"u"` + S string `redis:"s"` + P []byte `redis:"p"` + B bool `redis:"b"` + Bt bool + Bf bool + s0 +} + +var scanStructTests = []struct { + title string + reply []string + value interface{} +}{ + {"basic", + []string{"i", "-1234", "u", "5678", "s", "hello", "p", "world", "b", "t", "Bt", "1", "Bf", "0", "X", "123", "y", "456"}, + &s1{I: -1234, U: 5678, S: "hello", P: []byte("world"), B: true, Bt: true, Bf: false, s0: s0{X: 123, Y: 456}}, + }, +} + +func TestScanStruct(t *testing.T) { + for _, tt := range scanStructTests { + + var reply []interface{} + for _, v := range tt.reply { + reply = append(reply, []byte(v)) + } + + value := reflect.New(reflect.ValueOf(tt.value).Type().Elem()) + + if err := redis.ScanStruct(reply, value.Interface()); err != nil { + t.Fatalf("ScanStruct(%s) returned error %v", tt.title, err) + } + + if !reflect.DeepEqual(value.Interface(), tt.value) { + t.Fatalf("ScanStruct(%s) returned %v, want %v", tt.title, value.Interface(), tt.value) + } + } +} + +func TestBadScanStructArgs(t *testing.T) { + x := []interface{}{"A", "b"} + test := func(v interface{}) { + if err := redis.ScanStruct(x, v); err == nil { + t.Errorf("Expect error for ScanStruct(%T, %T)", x, v) + } + } + + test(nil) + + var v0 *struct{} + test(v0) + + var v1 int + test(&v1) + + x = x[:1] + v2 := struct{ A string }{} + test(&v2) +} + +var scanSliceTests = []struct { + src []interface{} + fieldNames []string + ok bool + dest interface{} +}{ + { + []interface{}{[]byte("1"), nil, []byte("-1")}, + nil, + true, + []int{1, 0, -1}, + }, + { + []interface{}{[]byte("1"), nil, []byte("2")}, + nil, + true, + []uint{1, 0, 2}, + }, + { + []interface{}{[]byte("-1")}, + nil, + false, + []uint{1}, + }, + { + []interface{}{[]byte("hello"), nil, []byte("world")}, + nil, + true, + [][]byte{[]byte("hello"), nil, []byte("world")}, + }, + { + []interface{}{[]byte("hello"), nil, []byte("world")}, + nil, + true, + []string{"hello", "", "world"}, + }, + { + []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, + nil, + true, + []struct{ A, B string }{{"a1", "b1"}, {"a2", "b2"}}, + }, + { + []interface{}{[]byte("a1"), []byte("b1")}, + nil, + false, + []struct{ A, B, C string }{{"a1", "b1", ""}}, + }, + { + []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, + nil, + true, + []*struct{ A, B string }{{"a1", "b1"}, {"a2", "b2"}}, + }, + { + []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, + []string{"A", "B"}, + true, + []struct{ A, C, B string }{{"a1", "", "b1"}, {"a2", "", "b2"}}, + }, + { + []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, + nil, + false, + []struct{}{}, + }, +} + +func TestScanSlice(t *testing.T) { + for _, tt := range scanSliceTests { + + typ := reflect.ValueOf(tt.dest).Type() + dest := reflect.New(typ) + + err := redis.ScanSlice(tt.src, dest.Interface(), tt.fieldNames...) + if tt.ok != (err == nil) { + t.Errorf("ScanSlice(%v, []%s, %v) returned error %v", tt.src, typ, tt.fieldNames, err) + continue + } + if tt.ok && !reflect.DeepEqual(dest.Elem().Interface(), tt.dest) { + t.Errorf("ScanSlice(src, []%s) returned %#v, want %#v", typ, dest.Elem().Interface(), tt.dest) + } + } +} + +func ExampleScanSlice() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + + c.Send("HMSET", "album:1", "title", "Red", "rating", 5) + c.Send("HMSET", "album:2", "title", "Earthbound", "rating", 1) + c.Send("HMSET", "album:3", "title", "Beat", "rating", 4) + c.Send("LPUSH", "albums", "1") + c.Send("LPUSH", "albums", "2") + c.Send("LPUSH", "albums", "3") + values, err := redis.Values(c.Do("SORT", "albums", + "BY", "album:*->rating", + "GET", "album:*->title", + "GET", "album:*->rating")) + if err != nil { + panic(err) + } + + var albums []struct { + Title string + Rating int + } + if err := redis.ScanSlice(values, &albums); err != nil { + panic(err) + } + fmt.Printf("%v\n", albums) + // Output: + // [{Earthbound 1} {Beat 4} {Red 5}] +} + +var argsTests = []struct { + title string + actual redis.Args + expected redis.Args +}{ + {"struct ptr", + redis.Args{}.AddFlat(&struct { + I int `redis:"i"` + U uint `redis:"u"` + S string `redis:"s"` + P []byte `redis:"p"` + Bt bool + Bf bool + }{ + -1234, 5678, "hello", []byte("world"), true, false, + }), + redis.Args{"i", int(-1234), "u", uint(5678), "s", "hello", "p", []byte("world"), "Bt", true, "Bf", false}, + }, + {"struct", + redis.Args{}.AddFlat(struct{ I int }{123}), + redis.Args{"I", 123}, + }, + {"slice", + redis.Args{}.Add(1).AddFlat([]string{"a", "b", "c"}).Add(2), + redis.Args{1, "a", "b", "c", 2}, + }, +} + +func TestArgs(t *testing.T) { + for _, tt := range argsTests { + if !reflect.DeepEqual(tt.actual, tt.expected) { + t.Fatalf("%s is %v, want %v", tt.title, tt.actual, tt.expected) + } + } +} + +func ExampleArgs() { + c, err := dial() + if err != nil { + panic(err) + } + defer c.Close() + + var p1, p2 struct { + Title string `redis:"title"` + Author string `redis:"author"` + Body string `redis:"body"` + } + + p1.Title = "Example" + p1.Author = "Gary" + p1.Body = "Hello" + + if _, err := c.Do("HMSET", redis.Args{}.Add("id1").AddFlat(&p1)...); err != nil { + panic(err) + } + + m := map[string]string{ + "title": "Example2", + "author": "Steve", + "body": "Map", + } + + if _, err := c.Do("HMSET", redis.Args{}.Add("id2").AddFlat(m)...); err != nil { + panic(err) + } + + for _, id := range []string{"id1", "id2"} { + + v, err := redis.Values(c.Do("HGETALL", id)) + if err != nil { + panic(err) + } + + if err := redis.ScanStruct(v, &p2); err != nil { + panic(err) + } + + fmt.Printf("%+v\n", p2) + } + + // Output: + // {Title:Example Author:Gary Body:Hello} + // {Title:Example2 Author:Steve Body:Map} +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script.go new file mode 100644 index 00000000000..78605a90a83 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script.go @@ -0,0 +1,86 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "crypto/sha1" + "encoding/hex" + "io" + "strings" +) + +// Script encapsulates the source, hash and key count for a Lua script. See +// http://redis.io/commands/eval for information on scripts in Redis. +type Script struct { + keyCount int + src string + hash string +} + +// NewScript returns a new script object. If keyCount is greater than or equal +// to zero, then the count is automatically inserted in the EVAL command +// argument list. If keyCount is less than zero, then the application supplies +// the count as the first value in the keysAndArgs argument to the Do, Send and +// SendHash methods. +func NewScript(keyCount int, src string) *Script { + h := sha1.New() + io.WriteString(h, src) + return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))} +} + +func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} { + var args []interface{} + if s.keyCount < 0 { + args = make([]interface{}, 1+len(keysAndArgs)) + args[0] = spec + copy(args[1:], keysAndArgs) + } else { + args = make([]interface{}, 2+len(keysAndArgs)) + args[0] = spec + args[1] = s.keyCount + copy(args[2:], keysAndArgs) + } + return args +} + +// Do evaluates the script. Under the covers, Do optimistically evaluates the +// script using the EVALSHA command. If the command fails because the script is +// not loaded, then Do evaluates the script using the EVAL command (thus +// causing the script to load). +func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) { + v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...) + if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") { + v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...) + } + return v, err +} + +// SendHash evaluates the script without waiting for the reply. The script is +// evaluated with the EVALSHA command. The application must ensure that the +// script is loaded by a previous call to Send, Do or Load methods. +func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error { + return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...) +} + +// Send evaluates the script without waiting for the reply. +func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error { + return c.Send("EVAL", s.args(s.src, keysAndArgs)...) +} + +// Load loads the script without evaluating it. +func (s *Script) Load(c Conn) error { + _, err := c.Do("SCRIPT", "LOAD", s.src) + return err +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script_test.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script_test.go new file mode 100644 index 00000000000..c9635bf08e3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/script_test.go @@ -0,0 +1,93 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis_test + +import ( + "fmt" + "reflect" + "testing" + "time" + + "github.com/garyburd/redigo/internal/redistest" + "github.com/garyburd/redigo/redis" +) + +func ExampleScript(c redis.Conn, reply interface{}, err error) { + // Initialize a package-level variable with a script. + var getScript = redis.NewScript(1, `return redis.call('get', KEYS[1])`) + + // In a function, use the script Do method to evaluate the script. The Do + // method optimistically uses the EVALSHA command. If the script is not + // loaded, then the Do method falls back to the EVAL command. + reply, err = getScript.Do(c, "foo") +} + +func TestScript(t *testing.T) { + c, err := redistest.Dial() + if err != nil { + t.Fatalf("error connection to database, %v", err) + } + defer c.Close() + + // To test fall back in Do, we make script unique by adding comment with current time. + script := fmt.Sprintf("--%d\nreturn {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", time.Now().UnixNano()) + s := redis.NewScript(2, script) + reply := []interface{}{[]byte("key1"), []byte("key2"), []byte("arg1"), []byte("arg2")} + + v, err := s.Do(c, "key1", "key2", "arg1", "arg2") + if err != nil { + t.Errorf("s.Do(c, ...) returned %v", err) + } + + if !reflect.DeepEqual(v, reply) { + t.Errorf("s.Do(c, ..); = %v, want %v", v, reply) + } + + err = s.Load(c) + if err != nil { + t.Errorf("s.Load(c) returned %v", err) + } + + err = s.SendHash(c, "key1", "key2", "arg1", "arg2") + if err != nil { + t.Errorf("s.SendHash(c, ...) returned %v", err) + } + + err = c.Flush() + if err != nil { + t.Errorf("c.Flush() returned %v", err) + } + + v, err = c.Receive() + if !reflect.DeepEqual(v, reply) { + t.Errorf("s.SendHash(c, ..); c.Receive() = %v, want %v", v, reply) + } + + err = s.Send(c, "key1", "key2", "arg1", "arg2") + if err != nil { + t.Errorf("s.Send(c, ...) returned %v", err) + } + + err = c.Flush() + if err != nil { + t.Errorf("c.Flush() returned %v", err) + } + + v, err = c.Receive() + if !reflect.DeepEqual(v, reply) { + t.Errorf("s.Send(c, ..); c.Receive() = %v, want %v", v, reply) + } + +} diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/test_test.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/test_test.go new file mode 100644 index 00000000000..b959a11f4f7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/test_test.go @@ -0,0 +1,38 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bufio" + "net" + "time" +) + +func SetNowFunc(f func() time.Time) { + nowFunc = f +} + +type nopCloser struct{ net.Conn } + +func (nopCloser) Close() error { return nil } + +// NewConnBufio is a hook for tests. +func NewConnBufio(rw bufio.ReadWriter) Conn { + return &conn{br: rw.Reader, bw: rw.Writer, conn: nopCloser{}} +} + +var ( + ErrNegativeInt = errNegativeInt +) diff --git a/Godeps/_workspace/src/github.com/garyburd/redigo/redis/zpop_example_test.go b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/zpop_example_test.go new file mode 100644 index 00000000000..1d86ee6ce8c --- /dev/null +++ b/Godeps/_workspace/src/github.com/garyburd/redigo/redis/zpop_example_test.go @@ -0,0 +1,113 @@ +// Copyright 2013 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis_test + +import ( + "fmt" + "github.com/garyburd/redigo/redis" +) + +// zpop pops a value from the ZSET key using WATCH/MULTI/EXEC commands. +func zpop(c redis.Conn, key string) (result string, err error) { + + defer func() { + // Return connection to normal state on error. + if err != nil { + c.Do("DISCARD") + } + }() + + // Loop until transaction is successful. + for { + if _, err := c.Do("WATCH", key); err != nil { + return "", err + } + + members, err := redis.Strings(c.Do("ZRANGE", key, 0, 0)) + if err != nil { + return "", err + } + if len(members) != 1 { + return "", redis.ErrNil + } + + c.Send("MULTI") + c.Send("ZREM", key, members[0]) + queued, err := c.Do("EXEC") + if err != nil { + return "", err + } + + if queued != nil { + result = members[0] + break + } + } + + return result, nil +} + +// zpopScript pops a value from a ZSET. +var zpopScript = redis.NewScript(1, ` + local r = redis.call('ZRANGE', KEYS[1], 0, 0) + if r ~= nil then + r = r[1] + redis.call('ZREM', KEYS[1], r) + end + return r +`) + +// This example implements ZPOP as described at +// http://redis.io/topics/transactions using WATCH/MULTI/EXEC and scripting. +func Example_zpop() { + c, err := dial() + if err != nil { + fmt.Println(err) + return + } + defer c.Close() + + // Add test data using a pipeline. + + for i, member := range []string{"red", "blue", "green"} { + c.Send("ZADD", "zset", i, member) + } + if _, err := c.Do(""); err != nil { + fmt.Println(err) + return + } + + // Pop using WATCH/MULTI/EXEC + + v, err := zpop(c, "zset") + if err != nil { + fmt.Println(err) + return + } + fmt.Println(v) + + // Pop using a script. + + v, err = redis.String(zpopScript.Do(c, "zset")) + if err != nil { + fmt.Println(err) + return + } + fmt.Println(v) + + // Output: + // red + // blue +} diff --git a/Godeps/_workspace/src/github.com/nranchev/go-libGeoIP/LICENSE.txt b/Godeps/_workspace/src/github.com/nranchev/go-libGeoIP/LICENSE.txt new file mode 100644 index 00000000000..5a0cb2dfabb --- /dev/null +++ b/Godeps/_workspace/src/github.com/nranchev/go-libGeoIP/LICENSE.txt @@ -0,0 +1,24 @@ +Copyright (c) 2010, Nikola Ranchev +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/nranchev/go-libGeoIP/README.textile b/Godeps/_workspace/src/github.com/nranchev/go-libGeoIP/README.textile new file mode 100644 index 00000000000..2e449b8a6f5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/nranchev/go-libGeoIP/README.textile @@ -0,0 +1,46 @@ +h2. General Description + +p{width:500px}. This is the Go implementation of the "Maxmind":http://www.maxmind.com/app/ip-location GeoIP API. It is incomplete and work in progress the initial goal is support only two of the database types - the City Lite and Country Lite. The only supported method is loading the full db on startup into memory (memory cache). + +h3. Supported Access Methods + +* In Memory (Load(string)) + +h3. Supported Database Formats + +* Country Edition (dbType=1) +* City Edition REV 0 (dbType=6) +* City Edition REV 1 (dbType=2) + +h3. Supported Lookups + +* By IP Address (GetLocationByIP(string)) +* By IP Number (GetLocationByIPNum(uint32)) + +h3. Supported Responses + +* CountryCode string (available in all databases) +* CountryName string (available in all databases) +* City string +* Region string +* PostalCode string +* Latitude float32 +* Longitude float32 + +h3. To Do +* Implement better error handling (report the error on load and lookups) +* Better returns, country edition has only code and name (perhaps use interfaces) +* Add test cases and benchmarking +* Add support for more database formats + +h3. Build + +make (See Makefile for more details) + +h3. Example + +./example DBFILE IPADDRESS (i.e. ./example GeoIP.dat 1.1.1.1) + +h3. Usage + +Please see example.go for a complete example of how to use this library. diff --git a/Godeps/_workspace/src/github.com/nranchev/go-libGeoIP/example/example.go b/Godeps/_workspace/src/github.com/nranchev/go-libGeoIP/example/example.go new file mode 100644 index 00000000000..8252b6272e0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/nranchev/go-libGeoIP/example/example.go @@ -0,0 +1,39 @@ +package main + +import ( + "flag" + "fmt" + "github.com/nranchev/go-libGeoIP" +) + +func main() { + flag.Parse() + + // Check the number of arguments + if flag.NArg() < 2 { + fmt.Printf("usage: main DBFILE IPADDRESS\n") + return + } + + // Set the arguments + dbFile := flag.Arg(0) + ipAddr := flag.Arg(1) + + // Load the database file, exit on failure + gi, err := libgeo.Load(dbFile) + if err != nil { + fmt.Printf("Error: %s\n", err.Error()) + return + } + + // Lookup the IP and display the details if country is found + loc := gi.GetLocationByIP(ipAddr) + if loc != nil { + fmt.Printf("Country: %s (%s)\n", loc.CountryName, loc.CountryCode) + fmt.Printf("City: %s\n", loc.City) + fmt.Printf("Region: %s\n", loc.Region) + fmt.Printf("Postal Code: %s\n", loc.PostalCode) + fmt.Printf("Latitude: %f\n", loc.Latitude) + fmt.Printf("Longitude: %f\n", loc.Longitude) + } +} diff --git a/Godeps/_workspace/src/github.com/nranchev/go-libGeoIP/libgeo.go b/Godeps/_workspace/src/github.com/nranchev/go-libGeoIP/libgeo.go new file mode 100644 index 00000000000..4794a55b087 --- /dev/null +++ b/Godeps/_workspace/src/github.com/nranchev/go-libGeoIP/libgeo.go @@ -0,0 +1,354 @@ +/** + * libgeo.go + * + * Copyright (c) 2010, Nikola Ranchev + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * - Neither the name of the nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package libgeo + +// Dependencies +import ( + "errors" + "os" +) + +// Globals (const arrays that will be initialized inside init()) +var ( + countryCode = []string{ + "--", "AP", "EU", "AD", "AE", "AF", "AG", "AI", "AL", "AM", "AN", "AO", "AQ", "AR", + "AS", "AT", "AU", "AW", "AZ", "BA", "BB", "BD", "BE", "BF", "BG", "BH", "BI", "BJ", + "BM", "BN", "BO", "BR", "BS", "BT", "BV", "BW", "BY", "BZ", "CA", "CC", "CD", "CF", + "CG", "CH", "CI", "CK", "CL", "CM", "CN", "CO", "CR", "CU", "CV", "CX", "CY", "CZ", + "DE", "DJ", "DK", "DM", "DO", "DZ", "EC", "EE", "EG", "EH", "ER", "ES", "ET", "FI", + "FJ", "FK", "FM", "FO", "FR", "FX", "GA", "GB", "GD", "GE", "GF", "GH", "GI", "GL", + "GM", "GN", "GP", "GQ", "GR", "GS", "GT", "GU", "GW", "GY", "HK", "HM", "HN", "HR", + "HT", "HU", "ID", "IE", "IL", "IN", "IO", "IQ", "IR", "IS", "IT", "JM", "JO", "JP", + "KE", "KG", "KH", "KI", "KM", "KN", "KP", "KR", "KW", "KY", "KZ", "LA", "LB", "LC", + "LI", "LK", "LR", "LS", "LT", "LU", "LV", "LY", "MA", "MC", "MD", "MG", "MH", "MK", + "ML", "MM", "MN", "MO", "MP", "MQ", "MR", "MS", "MT", "MU", "MV", "MW", "MX", "MY", + "MZ", "NA", "NC", "NE", "NF", "NG", "NI", "NL", "NO", "NP", "NR", "NU", "NZ", "OM", + "PA", "PE", "PF", "PG", "PH", "PK", "PL", "PM", "PN", "PR", "PS", "PT", "PW", "PY", + "QA", "RE", "RO", "RU", "RW", "SA", "SB", "SC", "SD", "SE", "SG", "SH", "SI", "SJ", + "SK", "SL", "SM", "SN", "SO", "SR", "ST", "SV", "SY", "SZ", "TC", "TD", "TF", "TG", + "TH", "TJ", "TK", "TM", "TN", "TO", "TL", "TR", "TT", "TV", "TW", "TZ", "UA", "UG", + "UM", "US", "UY", "UZ", "VA", "VC", "VE", "VG", "VI", "VN", "VU", "WF", "WS", "YE", + "YT", "RS", "ZA", "ZM", "ME", "ZW", "A1", "A2", "O1", "AX", "GG", "IM", "JE", "BL", + "MF", "BQ", "SS", "O1"} + countryName = []string{ + "N/A", "Asia/Pacific Region", "Europe", "Andorra", "United Arab Emirates", + "Afghanistan", "Antigua and Barbuda", "Anguilla", "Albania", "Armenia", + "Netherlands Antilles", "Angola", "Antarctica", "Argentina", "American Samoa", + "Austria", "Australia", "Aruba", "Azerbaijan", "Bosnia and Herzegovina", + "Barbados", "Bangladesh", "Belgium", "Burkina Faso", "Bulgaria", "Bahrain", + "Burundi", "Benin", "Bermuda", "Brunei Darussalam", "Bolivia", "Brazil", "Bahamas", + "Bhutan", "Bouvet Island", "Botswana", "Belarus", "Belize", "Canada", + "Cocos (Keeling) Islands", "Congo, The Democratic Republic of the", + "Central African Republic", "Congo", "Switzerland", "Cote D'Ivoire", + "Cook Islands", "Chile", "Cameroon", "China", "Colombia", "Costa Rica", "Cuba", + "Cape Verde", "Christmas Island", "Cyprus", "Czech Republic", "Germany", + "Djibouti", "Denmark", "Dominica", "Dominican Republic", "Algeria", "Ecuador", + "Estonia", "Egypt", "Western Sahara", "Eritrea", "Spain", "Ethiopia", "Finland", + "Fiji", "Falkland Islands (Malvinas)", "Micronesia, Federated States of", + "Faroe Islands", "France", "France, Metropolitan", "Gabon", "United Kingdom", + "Grenada", "Georgia", "French Guiana", "Ghana", "Gibraltar", "Greenland", "Gambia", + "Guinea", "Guadeloupe", "Equatorial Guinea", "Greece", + "South Georgia and the South Sandwich Islands", "Guatemala", "Guam", + "Guinea-Bissau", "Guyana", "Hong Kong", "Heard Island and McDonald Islands", + "Honduras", "Croatia", "Haiti", "Hungary", "Indonesia", "Ireland", "Israel", "India", + "British Indian Ocean Territory", "Iraq", "Iran, Islamic Republic of", + "Iceland", "Italy", "Jamaica", "Jordan", "Japan", "Kenya", "Kyrgyzstan", "Cambodia", + "Kiribati", "Comoros", "Saint Kitts and Nevis", + "Korea, Democratic People's Republic of", "Korea, Republic of", "Kuwait", + "Cayman Islands", "Kazakhstan", "Lao People's Democratic Republic", "Lebanon", + "Saint Lucia", "Liechtenstein", "Sri Lanka", "Liberia", "Lesotho", "Lithuania", + "Luxembourg", "Latvia", "Libyan Arab Jamahiriya", "Morocco", "Monaco", + "Moldova, Republic of", "Madagascar", "Marshall Islands", + "Macedonia", "Mali", "Myanmar", "Mongolia", + "Macau", "Northern Mariana Islands", "Martinique", "Mauritania", "Montserrat", + "Malta", "Mauritius", "Maldives", "Malawi", "Mexico", "Malaysia", "Mozambique", + "Namibia", "New Caledonia", "Niger", "Norfolk Island", "Nigeria", "Nicaragua", + "Netherlands", "Norway", "Nepal", "Nauru", "Niue", "New Zealand", "Oman", "Panama", + "Peru", "French Polynesia", "Papua New Guinea", "Philippines", "Pakistan", + "Poland", "Saint Pierre and Miquelon", "Pitcairn Islands", "Puerto Rico", + "Palestinian Territory", "Portugal", "Palau", "Paraguay", "Qatar", + "Reunion", "Romania", "Russian Federation", "Rwanda", "Saudi Arabia", + "Solomon Islands", "Seychelles", "Sudan", "Sweden", "Singapore", "Saint Helena", + "Slovenia", "Svalbard and Jan Mayen", "Slovakia", "Sierra Leone", "San Marino", + "Senegal", "Somalia", "Suriname", "Sao Tome and Principe", "El Salvador", + "Syrian Arab Republic", "Swaziland", "Turks and Caicos Islands", "Chad", + "French Southern Territories", "Togo", "Thailand", "Tajikistan", "Tokelau", + "Turkmenistan", "Tunisia", "Tonga", "Timor-Leste", "Turkey", "Trinidad and Tobago", + "Tuvalu", "Taiwan", "Tanzania, United Republic of", "Ukraine", "Uganda", + "United States Minor Outlying Islands", "United States", "Uruguay", "Uzbekistan", + "Holy See (Vatican City State)", "Saint Vincent and the Grenadines", + "Venezuela", "Virgin Islands, British", "Virgin Islands, U.S.", "Vietnam", + "Vanuatu", "Wallis and Futuna", "Samoa", "Yemen", "Mayotte", "Serbia", + "South Africa", "Zambia", "Montenegro", "Zimbabwe", "Anonymous Proxy", + "Satellite Provider", "Other", "Aland Islands", "Guernsey", "Isle of Man", "Jersey", + "Saint Barthelemy", "Saint Martin", "Bonaire, Saint Eustatius and Saba", + "South Sudan", "Other"} +) + +// Constants +const ( + maxRecordLength = 4 + standardRecordLength = 3 + countryBegin = 16776960 + structureInfoMaxSize = 20 + fullRecordLength = 60 + segmentRecordLength = 3 + + // DB Types + dbCountryEdition = byte(1) + dbCityEditionRev0 = byte(6) + dbCityEditionRev1 = byte(2) +) + +// These are some structs +type GeoIP struct { + databaseSegment int // No need to make an array of size 1 + recordLength int // Set to one of the constants above + dbType byte // Store the database type + data []byte // All of the data from the DB file +} +type Location struct { + CountryCode string // If country ed. only country info is filled + CountryName string // If country ed. only country info is filled + Region string + City string + PostalCode string + Latitude float32 + Longitude float32 +} + +// Load the database file in memory, detect the db format and setup the GeoIP struct +func Load(filename string) (gi *GeoIP, err error) { + // Try to open the requested file + dbInfo, err := os.Lstat(filename) + if err != nil { + return + } + dbFile, err := os.Open(filename) + if err != nil { + return + } + + // Copy the db into memory + gi = new(GeoIP) + gi.data = make([]byte, dbInfo.Size()) + dbFile.Read(gi.data) + dbFile.Close() + + // Check the database type + gi.dbType = dbCountryEdition // Default the database to country edition + gi.databaseSegment = countryBegin // Default to country DB + gi.recordLength = standardRecordLength // Default to country DB + + // Search for the DB type headers + delim := make([]byte, 3) + for i := 0; i < structureInfoMaxSize; i++ { + delim = gi.data[len(gi.data)-i-3-1 : len(gi.data)-i-1] + if int8(delim[0]) == -1 && int8(delim[1]) == -1 && int8(delim[2]) == -1 { + gi.dbType = gi.data[len(gi.data)-i-1] + // If we detect city edition set the correct segment offset + if gi.dbType == dbCityEditionRev0 || gi.dbType == dbCityEditionRev1 { + buf := make([]byte, segmentRecordLength) + buf = gi.data[len(gi.data)-i-1+1 : len(gi.data)-i-1+4] + gi.databaseSegment = 0 + for j := 0; j < segmentRecordLength; j++ { + gi.databaseSegment += (int(buf[j]) << uint8(j*8)) + } + } + break + } + } + + // Support older formats + if gi.dbType >= 106 { + gi.dbType -= 105 + } + + // Reject unsupported formats + if gi.dbType != dbCountryEdition && gi.dbType != dbCityEditionRev0 && gi.dbType != dbCityEditionRev1 { + err = errors.New("Unsupported database format") + return + } + + return +} + +// Lookup by IP address and return location +func (gi *GeoIP) GetLocationByIP(ip string) *Location { + return gi.GetLocationByIPNum(AddrToNum(ip)) +} + +// Lookup by IP number and return location +func (gi *GeoIP) GetLocationByIPNum(ipNum uint32) *Location { + // Perform the lookup on the database to see if the record is found + offset := gi.lookupByIPNum(ipNum) + + // Check if the country was found + if gi.dbType == dbCountryEdition && offset-countryBegin == 0 || + gi.dbType != dbCountryEdition && gi.databaseSegment == offset { + return nil + } + + // Create a generic location structure + location := new(Location) + + // If the database is country + if gi.dbType == dbCountryEdition { + location.CountryCode = countryCode[offset-countryBegin] + location.CountryName = countryName[offset-countryBegin] + + return location + } + + // Find the max record length + recPointer := offset + (2*gi.recordLength-1)*gi.databaseSegment + recordEnd := recPointer + fullRecordLength + if len(gi.data)-recPointer < fullRecordLength { + recordEnd = len(gi.data) + } + + // Read the country code/name first + location.CountryCode = countryCode[gi.data[recPointer]] + location.CountryName = countryName[gi.data[recPointer]] + readLen := 1 + recPointer += 1 + + // Get the region + for readLen = 0; gi.data[recPointer+readLen] != '\000' && + recPointer+readLen < recordEnd; readLen++ { + } + if readLen != 0 { + location.Region = string(gi.data[recPointer : recPointer+readLen]) + } + recPointer += readLen + 1 + + // Get the city + for readLen = 0; gi.data[recPointer+readLen] != '\000' && + recPointer+readLen < recordEnd; readLen++ { + } + if readLen != 0 { + location.City = string(gi.data[recPointer : recPointer+readLen]) + } + recPointer += readLen + 1 + + // Get the postal code + for readLen = 0; gi.data[recPointer+readLen] != '\000' && + recPointer+readLen < recordEnd; readLen++ { + } + if readLen != 0 { + location.PostalCode = string(gi.data[recPointer : recPointer+readLen]) + } + recPointer += readLen + 1 + + // Get the latitude + coordinate := float32(0) + for j := 0; j < 3; j++ { + coordinate += float32(int32(gi.data[recPointer+j]) << uint8(j*8)) + } + location.Latitude = float32(coordinate/10000 - 180) + recPointer += 3 + + // Get the longitude + coordinate = 0 + for j := 0; j < 3; j++ { + coordinate += float32(int32(gi.data[recPointer+j]) << uint8(j*8)) + } + location.Longitude = float32(coordinate/10000 - 180) + + return location +} + +// Read the database and return record position +func (gi *GeoIP) lookupByIPNum(ip uint32) int { + buf := make([]byte, 2*maxRecordLength) + x := make([]int, 2) + offset := 0 + for depth := 31; depth >= 0; depth-- { + for i := 0; i < 2*maxRecordLength; i++ { + buf[i] = gi.data[(2*gi.recordLength*offset)+i] + } + for i := 0; i < 2; i++ { + x[i] = 0 + for j := 0; j < gi.recordLength; j++ { + var y int = int(buf[i*gi.recordLength+j]) + if y < 0 { + y += 256 + } + x[i] += (y << uint(j*8)) + } + } + if (ip & (1 << uint(depth))) > 0 { + if x[1] >= gi.databaseSegment { + return x[1] + } + offset = x[1] + } else { + if x[0] >= gi.databaseSegment { + return x[0] + } + offset = x[0] + } + } + return 0 +} + +// Convert ip address to an int representation +func AddrToNum(ip string) uint32 { + octet := uint32(0) + ipnum := uint32(0) + i := 3 + for j := 0; j < len(ip); j++ { + c := byte(ip[j]) + if c == '.' { + if octet > 255 { + return 0 + } + ipnum <<= 8 + ipnum += octet + i-- + octet = 0 + } else { + t := octet + octet <<= 3 + octet += t + octet += t + c -= '0' + if c > 9 { + return 0 + } + octet += uint32(c) + } + } + if (octet > 255) || (i != 0) { + return 0 + } + ipnum <<= 8 + return uint32(ipnum + octet) +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE b/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 00000000000..a68e67f01b0 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,188 @@ + +Copyright (c) 2011-2014 - Canonical Inc. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml b/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 00000000000..8da58fbf6f8 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/README.md b/Godeps/_workspace/src/gopkg.in/yaml.v2/README.md new file mode 100644 index 00000000000..d6c919e6073 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/README.md @@ -0,0 +1,128 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +type T struct { + A string + B struct{C int; D []int ",flow"} +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go new file mode 100644 index 00000000000..95ec014e8cc --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,742 @@ +package yaml + +import ( + "io" + "os" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// File read handler. +func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_file.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_file_read_handler + parser.input_file = file +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + return true +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// File write handler. +func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_file.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_file_write_handler + emitter.output_file = file +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } + return true +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } + return true +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } + return true +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } + return true +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } + return true +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compliler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go new file mode 100644 index 00000000000..085cddc44be --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,683 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + + if len(b) == 0 { + b = []byte{'\n'} + } + + yaml_parser_set_input_string(&p.parser, b) + + p.skip() + if p.event.typ != yaml_STREAM_START_EVENT { + panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return &p +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +func (p *parser) skip() { + if p.event.typ != yaml_NO_EVENT { + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + yaml_event_delete(&p.event) + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + switch p.event.typ { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) + } + panic("unreachable") +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.skip() + n.children = append(n.children, p.parse()) + if p.event.typ != yaml_DOCUMENT_END_EVENT { + panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + p.skip() + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.skip() + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.skip() + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.skip() + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[string]bool + mapType reflect.Type + terrors []string +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() +) + +func newDecoder() *decoder { + d := &decoder{mapType: defaultMapType} + d.aliases = make(map[string]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + an, ok := d.doc.anchors[n.value] + if !ok { + failf("unknown anchor '%s' referenced", n.value) + } + if d.aliases[n.value] { + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n.value] = true + good = d.unmarshal(an, out) + delete(d.aliases, n.value) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if s, ok := resolved.(string); ok && out.CanAddr() { + if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { + err := u.UnmarshalText([]byte(s)) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + good = true + } else if resolved != nil { + out.SetString(n.value) + good = true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else { + out.Set(reflect.ValueOf(resolved)) + } + good = true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + good = true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + good = true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + good = true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + good = true + case int64: + out.SetFloat(float64(resolved)) + good = true + case uint64: + out.SetFloat(float64(resolved)) + good = true + case float64: + out.SetFloat(resolved) + good = true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + good = true + } + } + if !good { + d.terror(n, tag, out) + } + return good +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + out.Set(out.Slice(0, j)) + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + out.SetMapIndex(k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + inlineMap.SetMapIndex(name, value) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + an, ok := d.doc.anchors[n.value] + if ok && an.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + an, ok := d.doc.anchors[ni.value] + if ok && an.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/decode_test.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/decode_test.go new file mode 100644 index 00000000000..04fdd9e72ca --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/decode_test.go @@ -0,0 +1,966 @@ +package yaml_test + +import ( + "errors" + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" + "math" + "net" + "reflect" + "strings" + "time" +) + +var unmarshalIntTest = 123 + +var unmarshalTests = []struct { + data string + value interface{} +}{ + { + "", + &struct{}{}, + }, { + "{}", &struct{}{}, + }, { + "v: hi", + map[string]string{"v": "hi"}, + }, { + "v: hi", map[string]interface{}{"v": "hi"}, + }, { + "v: true", + map[string]string{"v": "true"}, + }, { + "v: true", + map[string]interface{}{"v": true}, + }, { + "v: 10", + map[string]interface{}{"v": 10}, + }, { + "v: 0b10", + map[string]interface{}{"v": 2}, + }, { + "v: 0xA", + map[string]interface{}{"v": 10}, + }, { + "v: 4294967296", + map[string]int64{"v": 4294967296}, + }, { + "v: 0.1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .Inf", + map[string]interface{}{"v": math.Inf(+1)}, + }, { + "v: -.Inf", + map[string]interface{}{"v": math.Inf(-1)}, + }, { + "v: -10", + map[string]interface{}{"v": -10}, + }, { + "v: -.1", + map[string]interface{}{"v": -0.1}, + }, + + // Simple values. + { + "123", + &unmarshalIntTest, + }, + + // Floats from spec + { + "canonical: 6.8523e+5", + map[string]interface{}{"canonical": 6.8523e+5}, + }, { + "expo: 685.230_15e+03", + map[string]interface{}{"expo": 685.23015e+03}, + }, { + "fixed: 685_230.15", + map[string]interface{}{"fixed": 685230.15}, + }, { + "neginf: -.inf", + map[string]interface{}{"neginf": math.Inf(-1)}, + }, { + "fixed: 685_230.15", + map[string]float64{"fixed": 685230.15}, + }, + //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported + //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails. + + // Bools from spec + { + "canonical: y", + map[string]interface{}{"canonical": true}, + }, { + "answer: NO", + map[string]interface{}{"answer": false}, + }, { + "logical: True", + map[string]interface{}{"logical": true}, + }, { + "option: on", + map[string]interface{}{"option": true}, + }, { + "option: on", + map[string]bool{"option": true}, + }, + // Ints from spec + { + "canonical: 685230", + map[string]interface{}{"canonical": 685230}, + }, { + "decimal: +685_230", + map[string]interface{}{"decimal": 685230}, + }, { + "octal: 02472256", + map[string]interface{}{"octal": 685230}, + }, { + "hexa: 0x_0A_74_AE", + map[string]interface{}{"hexa": 685230}, + }, { + "bin: 0b1010_0111_0100_1010_1110", + map[string]interface{}{"bin": 685230}, + }, { + "bin: -0b101010", + map[string]interface{}{"bin": -42}, + }, { + "decimal: +685_230", + map[string]int{"decimal": 685230}, + }, + + //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported + + // Nulls from spec + { + "empty:", + map[string]interface{}{"empty": nil}, + }, { + "canonical: ~", + map[string]interface{}{"canonical": nil}, + }, { + "english: null", + map[string]interface{}{"english": nil}, + }, { + "~: null key", + map[interface{}]string{nil: "null key"}, + }, { + "empty:", + map[string]*bool{"empty": nil}, + }, + + // Flow sequence + { + "seq: [A,B]", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq: [A,B,C,]", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]int{"seq": []int{1}}, + }, { + "seq: [A,1,C]", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + // Block sequence + { + "seq:\n - A\n - B", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq:\n - A\n - B\n - C", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]int{"seq": []int{1}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + + // Literal block scalar + { + "scalar: | # Comment\n\n literal\n\n \ttext\n\n", + map[string]string{"scalar": "\nliteral\n\n\ttext\n"}, + }, + + // Folded block scalar + { + "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n", + map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"}, + }, + + // Map inside interface with no type hints. + { + "a: {b: c}", + map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + }, + + // Structs and type conversions. + { + "hello: world", + &struct{ Hello string }{"world"}, + }, { + "a: {b: c}", + &struct{ A struct{ B string } }{struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A *struct{ B string } }{&struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A map[string]string }{map[string]string{"b": "c"}}, + }, { + "a: {b: c}", + &struct{ A *map[string]string }{&map[string]string{"b": "c"}}, + }, { + "a:", + &struct{ A map[string]string }{}, + }, { + "a: 1", + &struct{ A int }{1}, + }, { + "a: 1", + &struct{ A float64 }{1}, + }, { + "a: 1.0", + &struct{ A int }{1}, + }, { + "a: 1.0", + &struct{ A uint }{1}, + }, { + "a: [1, 2]", + &struct{ A []int }{[]int{1, 2}}, + }, { + "a: 1", + &struct{ B int }{0}, + }, { + "a: 1", + &struct { + B int "a" + }{1}, + }, { + "a: y", + &struct{ A bool }{true}, + }, + + // Some cross type conversions + { + "v: 42", + map[string]uint{"v": 42}, + }, { + "v: -42", + map[string]uint{}, + }, { + "v: 4294967296", + map[string]uint64{"v": 4294967296}, + }, { + "v: -4294967296", + map[string]uint64{}, + }, + + // int + { + "int_max: 2147483647", + map[string]int{"int_max": math.MaxInt32}, + }, + { + "int_min: -2147483648", + map[string]int{"int_min": math.MinInt32}, + }, + { + "int_overflow: 9223372036854775808", // math.MaxInt64 + 1 + map[string]int{}, + }, + + // int64 + { + "int64_max: 9223372036854775807", + map[string]int64{"int64_max": math.MaxInt64}, + }, + { + "int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111", + map[string]int64{"int64_max_base2": math.MaxInt64}, + }, + { + "int64_min: -9223372036854775808", + map[string]int64{"int64_min": math.MinInt64}, + }, + { + "int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111", + map[string]int64{"int64_neg_base2": -math.MaxInt64}, + }, + { + "int64_overflow: 9223372036854775808", // math.MaxInt64 + 1 + map[string]int64{}, + }, + + // uint + { + "uint_min: 0", + map[string]uint{"uint_min": 0}, + }, + { + "uint_max: 4294967295", + map[string]uint{"uint_max": math.MaxUint32}, + }, + { + "uint_underflow: -1", + map[string]uint{}, + }, + + // uint64 + { + "uint64_min: 0", + map[string]uint{"uint64_min": 0}, + }, + { + "uint64_max: 18446744073709551615", + map[string]uint64{"uint64_max": math.MaxUint64}, + }, + { + "uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111", + map[string]uint64{"uint64_max_base2": math.MaxUint64}, + }, + { + "uint64_maxint64: 9223372036854775807", + map[string]uint64{"uint64_maxint64": math.MaxInt64}, + }, + { + "uint64_underflow: -1", + map[string]uint64{}, + }, + + // float32 + { + "float32_max: 3.40282346638528859811704183484516925440e+38", + map[string]float32{"float32_max": math.MaxFloat32}, + }, + { + "float32_nonzero: 1.401298464324817070923729583289916131280e-45", + map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32}, + }, + { + "float32_maxuint64: 18446744073709551615", + map[string]float32{"float32_maxuint64": float32(math.MaxUint64)}, + }, + { + "float32_maxuint64+1: 18446744073709551616", + map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)}, + }, + + // float64 + { + "float64_max: 1.797693134862315708145274237317043567981e+308", + map[string]float64{"float64_max": math.MaxFloat64}, + }, + { + "float64_nonzero: 4.940656458412465441765687928682213723651e-324", + map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64}, + }, + { + "float64_maxuint64: 18446744073709551615", + map[string]float64{"float64_maxuint64": float64(math.MaxUint64)}, + }, + { + "float64_maxuint64+1: 18446744073709551616", + map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)}, + }, + + // Overflow cases. + { + "v: 4294967297", + map[string]int32{}, + }, { + "v: 128", + map[string]int8{}, + }, + + // Quoted values. + { + "'1': '\"2\"'", + map[interface{}]interface{}{"1": "\"2\""}, + }, { + "v:\n- A\n- 'B\n\n C'\n", + map[string][]string{"v": []string{"A", "B\nC"}}, + }, + + // Explicit tags. + { + "v: !!float '1.1'", + map[string]interface{}{"v": 1.1}, + }, { + "v: !!null ''", + map[string]interface{}{"v": nil}, + }, { + "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'", + map[string]interface{}{"v": 1}, + }, + + // Anchors and aliases. + { + "a: &x 1\nb: &y 2\nc: *x\nd: *y\n", + &struct{ A, B, C, D int }{1, 2, 1, 2}, + }, { + "a: &a {c: 1}\nb: *a", + &struct { + A, B struct { + C int + } + }{struct{ C int }{1}, struct{ C int }{1}}, + }, { + "a: &a [1, 2]\nb: *a", + &struct{ B []int }{[]int{1, 2}}, + }, { + "b: *a\na: &a {c: 1}", + &struct { + A, B struct { + C int + } + }{struct{ C int }{1}, struct{ C int }{1}}, + }, + + // Bug #1133337 + { + "foo: ''", + map[string]*string{"foo": new(string)}, + }, { + "foo: null", + map[string]string{"foo": ""}, + }, { + "foo: null", + map[string]interface{}{"foo": nil}, + }, + + // Ignored field + { + "a: 1\nb: 2\n", + &struct { + A int + B int "-" + }{1, 0}, + }, + + // Bug #1191981 + { + "" + + "%YAML 1.1\n" + + "--- !!str\n" + + `"Generic line break (no glyph)\n\` + "\n" + + ` Generic line break (glyphed)\n\` + "\n" + + ` Line separator\u2028\` + "\n" + + ` Paragraph separator\u2029"` + "\n", + "" + + "Generic line break (no glyph)\n" + + "Generic line break (glyphed)\n" + + "Line separator\u2028Paragraph separator\u2029", + }, + + // Struct inlining + { + "a: 1\nb: 2\nc: 3\n", + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + }, + + // Map inlining + { + "a: 1\nb: 2\nc: 3\n", + &struct { + A int + C map[string]int `yaml:",inline"` + }{1, map[string]int{"b": 2, "c": 3}}, + }, + + // bug 1243827 + { + "a: -b_c", + map[string]interface{}{"a": "-b_c"}, + }, + { + "a: +b_c", + map[string]interface{}{"a": "+b_c"}, + }, + { + "a: 50cent_of_dollar", + map[string]interface{}{"a": "50cent_of_dollar"}, + }, + + // Duration + { + "a: 3s", + map[string]time.Duration{"a": 3 * time.Second}, + }, + + // Issue #24. + { + "a: ", + map[string]string{"a": ""}, + }, + + // Base 60 floats are obsolete and unsupported. + { + "a: 1:1\n", + map[string]string{"a": "1:1"}, + }, + + // Binary data. + { + "a: !!binary gIGC\n", + map[string]string{"a": "\x80\x81\x82"}, + }, { + "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", + map[string]string{"a": strings.Repeat("\x90", 54)}, + }, { + "a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n", + map[string]string{"a": strings.Repeat("\x00", 52)}, + }, + + // Ordered maps. + { + "{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}", + &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, + }, + + // Issue #39. + { + "a:\n b:\n c: d\n", + map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}}, + }, + + // Custom map type. + { + "a: {b: c}", + M{"a": M{"b": "c"}}, + }, + + // Support encoding.TextUnmarshaler. + { + "a: 1.2.3.4\n", + map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, + }, + { + "a: 2015-02-24T18:19:39Z\n", + map[string]time.Time{"a": time.Unix(1424801979, 0)}, + }, + + // Encode empty lists as zero-length slices. + { + "a: []", + &struct{ A []int }{[]int{}}, + }, +} + +type M map[interface{}]interface{} + +type inlineB struct { + B int + inlineC `yaml:",inline"` +} + +type inlineC struct { + C int +} + +func (s *S) TestUnmarshal(c *C) { + for _, item := range unmarshalTests { + t := reflect.ValueOf(item.value).Type() + var value interface{} + switch t.Kind() { + case reflect.Map: + value = reflect.MakeMap(t).Interface() + case reflect.String: + value = reflect.New(t).Interface() + case reflect.Ptr: + value = reflect.New(t.Elem()).Interface() + default: + c.Fatalf("missing case for %s", t) + } + err := yaml.Unmarshal([]byte(item.data), value) + if _, ok := err.(*yaml.TypeError); !ok { + c.Assert(err, IsNil) + } + if t.Kind() == reflect.String { + c.Assert(*value.(*string), Equals, item.value) + } else { + c.Assert(value, DeepEquals, item.value) + } + } +} + +func (s *S) TestUnmarshalNaN(c *C) { + value := map[string]interface{}{} + err := yaml.Unmarshal([]byte("notanum: .NaN"), &value) + c.Assert(err, IsNil) + c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true) +} + +var unmarshalErrorTests = []struct { + data, error string +}{ + {"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"}, + {"v: [A,", "yaml: line 1: did not find expected node content"}, + {"v:\n- [A,", "yaml: line 2: did not find expected node content"}, + {"a: *b\n", "yaml: unknown anchor 'b' referenced"}, + {"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"}, + {"value: -", "yaml: block sequence entries are not allowed in this context"}, + {"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"}, + {"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`}, + {"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`}, +} + +func (s *S) TestUnmarshalErrors(c *C) { + for _, item := range unmarshalErrorTests { + var value interface{} + err := yaml.Unmarshal([]byte(item.data), &value) + c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value)) + } +} + +var unmarshalerTests = []struct { + data, tag string + value interface{} +}{ + {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}}, + {"_: [1,A]", "!!seq", []interface{}{1, "A"}}, + {"_: 10", "!!int", 10}, + {"_: null", "!!null", nil}, + {`_: BAR!`, "!!str", "BAR!"}, + {`_: "BAR!"`, "!!str", "BAR!"}, + {"_: !!foo 'BAR!'", "!!foo", "BAR!"}, +} + +var unmarshalerResult = map[int]error{} + +type unmarshalerType struct { + value interface{} +} + +func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error { + if err := unmarshal(&o.value); err != nil { + return err + } + if i, ok := o.value.(int); ok { + if result, ok := unmarshalerResult[i]; ok { + return result + } + } + return nil +} + +type unmarshalerPointer struct { + Field *unmarshalerType "_" +} + +type unmarshalerValue struct { + Field unmarshalerType "_" +} + +func (s *S) TestUnmarshalerPointerField(c *C) { + for _, item := range unmarshalerTests { + obj := &unmarshalerPointer{} + err := yaml.Unmarshal([]byte(item.data), obj) + c.Assert(err, IsNil) + if item.value == nil { + c.Assert(obj.Field, IsNil) + } else { + c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) + c.Assert(obj.Field.value, DeepEquals, item.value) + } + } +} + +func (s *S) TestUnmarshalerValueField(c *C) { + for _, item := range unmarshalerTests { + obj := &unmarshalerValue{} + err := yaml.Unmarshal([]byte(item.data), obj) + c.Assert(err, IsNil) + c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) + c.Assert(obj.Field.value, DeepEquals, item.value) + } +} + +func (s *S) TestUnmarshalerWholeDocument(c *C) { + obj := &unmarshalerType{} + err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj) + c.Assert(err, IsNil) + value, ok := obj.value.(map[interface{}]interface{}) + c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value)) + c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value) +} + +func (s *S) TestUnmarshalerTypeError(c *C) { + unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}} + unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}} + defer func() { + delete(unmarshalerResult, 2) + delete(unmarshalerResult, 4) + }() + + type T struct { + Before int + After int + M map[string]*unmarshalerType + } + var v T + data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}` + err := yaml.Unmarshal([]byte(data), &v) + c.Assert(err, ErrorMatches, ""+ + "yaml: unmarshal errors:\n"+ + " line 1: cannot unmarshal !!str `A` into int\n"+ + " foo\n"+ + " bar\n"+ + " line 1: cannot unmarshal !!str `B` into int") + c.Assert(v.M["abc"], NotNil) + c.Assert(v.M["def"], IsNil) + c.Assert(v.M["ghi"], NotNil) + c.Assert(v.M["jkl"], IsNil) + + c.Assert(v.M["abc"].value, Equals, 1) + c.Assert(v.M["ghi"].value, Equals, 3) +} + +type proxyTypeError struct{} + +func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + var a int32 + var b int64 + if err := unmarshal(&s); err != nil { + panic(err) + } + if s == "a" { + if err := unmarshal(&b); err == nil { + panic("should have failed") + } + return unmarshal(&a) + } + if err := unmarshal(&a); err == nil { + panic("should have failed") + } + return unmarshal(&b) +} + +func (s *S) TestUnmarshalerTypeErrorProxying(c *C) { + type T struct { + Before int + After int + M map[string]*proxyTypeError + } + var v T + data := `{before: A, m: {abc: a, def: b}, after: B}` + err := yaml.Unmarshal([]byte(data), &v) + c.Assert(err, ErrorMatches, ""+ + "yaml: unmarshal errors:\n"+ + " line 1: cannot unmarshal !!str `A` into int\n"+ + " line 1: cannot unmarshal !!str `a` into int32\n"+ + " line 1: cannot unmarshal !!str `b` into int64\n"+ + " line 1: cannot unmarshal !!str `B` into int") +} + +type failingUnmarshaler struct{} + +var failingErr = errors.New("failingErr") + +func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { + return failingErr +} + +func (s *S) TestUnmarshalerError(c *C) { + err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{}) + c.Assert(err, Equals, failingErr) +} + +type sliceUnmarshaler []int + +func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { + var slice []int + err := unmarshal(&slice) + if err == nil { + *su = slice + return nil + } + + var intVal int + err = unmarshal(&intVal) + if err == nil { + *su = []int{intVal} + return nil + } + + return err +} + +func (s *S) TestUnmarshalerRetry(c *C) { + var su sliceUnmarshaler + err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su) + c.Assert(err, IsNil) + c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3})) + + err = yaml.Unmarshal([]byte("1"), &su) + c.Assert(err, IsNil) + c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1})) +} + +// From http://yaml.org/type/merge.html +var mergeTests = ` +anchors: + list: + - &CENTER { "x": 1, "y": 2 } + - &LEFT { "x": 0, "y": 2 } + - &BIG { "r": 10 } + - &SMALL { "r": 1 } + +# All the following maps are equal: + +plain: + # Explicit keys + "x": 1 + "y": 2 + "r": 10 + label: center/big + +mergeOne: + # Merge one map + << : *CENTER + "r": 10 + label: center/big + +mergeMultiple: + # Merge multiple maps + << : [ *CENTER, *BIG ] + label: center/big + +override: + # Override + << : [ *BIG, *LEFT, *SMALL ] + "x": 1 + label: center/big + +shortTag: + # Explicit short merge tag + !!merge "<<" : [ *CENTER, *BIG ] + label: center/big + +longTag: + # Explicit merge long tag + ! "<<" : [ *CENTER, *BIG ] + label: center/big + +inlineMap: + # Inlined map + << : {"x": 1, "y": 2, "r": 10} + label: center/big + +inlineSequenceMap: + # Inlined map in sequence + << : [ *CENTER, {"r": 10} ] + label: center/big +` + +func (s *S) TestMerge(c *C) { + var want = map[interface{}]interface{}{ + "x": 1, + "y": 2, + "r": 10, + "label": "center/big", + } + + var m map[interface{}]interface{} + err := yaml.Unmarshal([]byte(mergeTests), &m) + c.Assert(err, IsNil) + for name, test := range m { + if name == "anchors" { + continue + } + c.Assert(test, DeepEquals, want, Commentf("test %q failed", name)) + } +} + +func (s *S) TestMergeStruct(c *C) { + type Data struct { + X, Y, R int + Label string + } + want := Data{1, 2, 10, "center/big"} + + var m map[string]Data + err := yaml.Unmarshal([]byte(mergeTests), &m) + c.Assert(err, IsNil) + for name, test := range m { + if name == "anchors" { + continue + } + c.Assert(test, Equals, want, Commentf("test %q failed", name)) + } +} + +var unmarshalNullTests = []func() interface{}{ + func() interface{} { var v interface{}; v = "v"; return &v }, + func() interface{} { var s = "s"; return &s }, + func() interface{} { var s = "s"; sptr := &s; return &sptr }, + func() interface{} { var i = 1; return &i }, + func() interface{} { var i = 1; iptr := &i; return &iptr }, + func() interface{} { m := map[string]int{"s": 1}; return &m }, + func() interface{} { m := map[string]int{"s": 1}; return m }, +} + +func (s *S) TestUnmarshalNull(c *C) { + for _, test := range unmarshalNullTests { + item := test() + zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface() + err := yaml.Unmarshal([]byte("null"), item) + c.Assert(err, IsNil) + if reflect.TypeOf(item).Kind() == reflect.Map { + c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface()) + } else { + c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero) + } + } +} + +func (s *S) TestUnmarshalSliceOnPreset(c *C) { + // Issue #48. + v := struct{ A []int }{[]int{1}} + yaml.Unmarshal([]byte("a: [2]"), &v) + c.Assert(v.A, DeepEquals, []int{2}) +} + +//var data []byte +//func init() { +// var err error +// data, err = ioutil.ReadFile("/tmp/file.yaml") +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkUnmarshal(c *C) { +// var err error +// for i := 0; i < c.N; i++ { +// var v map[string]interface{} +// err = yaml.Unmarshal(data, &v) +// } +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkMarshal(c *C) { +// var v map[string]interface{} +// yaml.Unmarshal(data, &v) +// c.ResetTimer() +// for i := 0; i < c.N; i++ { +// yaml.Marshal(&v) +// } +//} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 00000000000..2befd553ed0 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") + } + return false +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an achor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceeded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceeded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceeded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceeded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go new file mode 100644 index 00000000000..84f84995517 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,306 @@ +package yaml + +import ( + "encoding" + "fmt" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool +} + +func newEncoder() (e *encoder) { + e = &encoder{} + e.must(yaml_emitter_initialize(&e.emitter)) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) + e.emit() + e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) + e.emit() + return e +} + +func (e *encoder) finish() { + e.must(yaml_document_end_event_initialize(&e.event, true)) + e.emit() + e.emitter.open_ended = false + e.must(yaml_stream_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { + e.must(false) + } +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() { + e.nilv() + return + } + iface := in.Interface() + if m, ok := iface.(Marshaler); ok { + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + } else if m, ok := iface.(encoding.TextMarshaler); ok { + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + } + switch in.Kind() { + case reflect.Interface: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + f() + e.must(yaml_mapping_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + rtag, rs := resolve("", s) + if rtag == yaml_BINARY_TAG { + if tag == "" || tag == yaml_STR_TAG { + tag = rtag + s = rs.(string) + } else if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } else { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + } + if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else if strings.Contains(s, "\n") { + style = yaml_LITERAL_SCALAR_STYLE + } else { + style = yaml_PLAIN_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // FIXME: Handle 64 bits here. + s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/encode_test.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/encode_test.go new file mode 100644 index 00000000000..84099bd3850 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/encode_test.go @@ -0,0 +1,501 @@ +package yaml_test + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" + + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" + "net" + "os" +) + +var marshalIntTest = 123 + +var marshalTests = []struct { + value interface{} + data string +}{ + { + nil, + "null\n", + }, { + &struct{}{}, + "{}\n", + }, { + map[string]string{"v": "hi"}, + "v: hi\n", + }, { + map[string]interface{}{"v": "hi"}, + "v: hi\n", + }, { + map[string]string{"v": "true"}, + "v: \"true\"\n", + }, { + map[string]string{"v": "false"}, + "v: \"false\"\n", + }, { + map[string]interface{}{"v": true}, + "v: true\n", + }, { + map[string]interface{}{"v": false}, + "v: false\n", + }, { + map[string]interface{}{"v": 10}, + "v: 10\n", + }, { + map[string]interface{}{"v": -10}, + "v: -10\n", + }, { + map[string]uint{"v": 42}, + "v: 42\n", + }, { + map[string]interface{}{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]int64{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]uint64{"v": 4294967296}, + "v: 4294967296\n", + }, { + map[string]interface{}{"v": "10"}, + "v: \"10\"\n", + }, { + map[string]interface{}{"v": 0.1}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": float64(0.1)}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": -0.1}, + "v: -0.1\n", + }, { + map[string]interface{}{"v": math.Inf(+1)}, + "v: .inf\n", + }, { + map[string]interface{}{"v": math.Inf(-1)}, + "v: -.inf\n", + }, { + map[string]interface{}{"v": math.NaN()}, + "v: .nan\n", + }, { + map[string]interface{}{"v": nil}, + "v: null\n", + }, { + map[string]interface{}{"v": ""}, + "v: \"\"\n", + }, { + map[string][]string{"v": []string{"A", "B"}}, + "v:\n- A\n- B\n", + }, { + map[string][]string{"v": []string{"A", "B\nC"}}, + "v:\n- A\n- |-\n B\n C\n", + }, { + map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}}, + "v:\n- A\n- 1\n- B:\n - 2\n - 3\n", + }, { + map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + "a:\n b: c\n", + }, { + map[string]interface{}{"a": "-"}, + "a: '-'\n", + }, + + // Simple values. + { + &marshalIntTest, + "123\n", + }, + + // Structures + { + &struct{ Hello string }{"world"}, + "hello: world\n", + }, { + &struct { + A struct { + B string + } + }{struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{&struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{}, + "a: null\n", + }, { + &struct{ A int }{1}, + "a: 1\n", + }, { + &struct{ A []int }{[]int{1, 2}}, + "a:\n- 1\n- 2\n", + }, { + &struct { + B int "a" + }{1}, + "a: 1\n", + }, { + &struct{ A bool }{true}, + "a: true\n", + }, + + // Conditional flag + { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{1, 0}, + "a: 1\n", + }, { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{0, 0}, + "{}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{&struct{ X, y int }{1, 2}}, + "a: {x: 1}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{nil}, + "{}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{&struct{ X, y int }{}}, + "a: {x: 0}\n", + }, { + &struct { + A struct{ X, y int } "a,omitempty,flow" + }{struct{ X, y int }{1, 2}}, + "a: {x: 1}\n", + }, { + &struct { + A struct{ X, y int } "a,omitempty,flow" + }{struct{ X, y int }{0, 1}}, + "{}\n", + }, { + &struct { + A float64 "a,omitempty" + B float64 "b,omitempty" + }{1, 0}, + "a: 1\n", + }, + + // Flow flag + { + &struct { + A []int "a,flow" + }{[]int{1, 2}}, + "a: [1, 2]\n", + }, { + &struct { + A map[string]string "a,flow" + }{map[string]string{"b": "c", "d": "e"}}, + "a: {b: c, d: e}\n", + }, { + &struct { + A struct { + B, D string + } "a,flow" + }{struct{ B, D string }{"c", "e"}}, + "a: {b: c, d: e}\n", + }, + + // Unexported field + { + &struct { + u int + A int + }{0, 1}, + "a: 1\n", + }, + + // Ignored field + { + &struct { + A int + B int "-" + }{1, 2}, + "a: 1\n", + }, + + // Struct inlining + { + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + "a: 1\nb: 2\nc: 3\n", + }, + + // Map inlining + { + &struct { + A int + C map[string]int `yaml:",inline"` + }{1, map[string]int{"b": 2, "c": 3}}, + "a: 1\nb: 2\nc: 3\n", + }, + + // Duration + { + map[string]time.Duration{"a": 3 * time.Second}, + "a: 3s\n", + }, + + // Issue #24: bug in map merging logic. + { + map[string]string{"a": ""}, + "a: \n", + }, + + // Issue #34: marshal unsupported base 60 floats quoted for compatibility + // with old YAML 1.1 parsers. + { + map[string]string{"a": "1:1"}, + "a: \"1:1\"\n", + }, + + // Binary data. + { + map[string]string{"a": "\x00"}, + "a: \"\\0\"\n", + }, { + map[string]string{"a": "\x80\x81\x82"}, + "a: !!binary gIGC\n", + }, { + map[string]string{"a": strings.Repeat("\x90", 54)}, + "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", + }, + + // Ordered maps. + { + &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, + "b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n", + }, + + // Encode unicode as utf-8 rather than in escaped form. + { + map[string]string{"a": "你好"}, + "a: 你好\n", + }, + + // Support encoding.TextMarshaler. + { + map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, + "a: 1.2.3.4\n", + }, + { + map[string]time.Time{"a": time.Unix(1424801979, 0)}, + "a: 2015-02-24T18:19:39Z\n", + }, + + // Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible). + { + map[string]string{"a": "b: c"}, + "a: 'b: c'\n", + }, + + // Containing hash mark ('#') in string should be quoted + { + map[string]string{"a": "Hello #comment"}, + "a: 'Hello #comment'\n", + }, + { + map[string]string{"a": "你好 #comment"}, + "a: '你好 #comment'\n", + }, +} + +func (s *S) TestMarshal(c *C) { + defer os.Setenv("TZ", os.Getenv("TZ")) + os.Setenv("TZ", "UTC") + for _, item := range marshalTests { + data, err := yaml.Marshal(item.value) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, item.data) + } +} + +var marshalErrorTests = []struct { + value interface{} + error string + panic string +}{{ + value: &struct { + B int + inlineB ",inline" + }{1, inlineB{2, inlineC{3}}}, + panic: `Duplicated key 'b' in struct struct \{ B int; .*`, +}, { + value: &struct { + A int + B map[string]int ",inline" + }{1, map[string]int{"a": 2}}, + panic: `Can't have key "a" in inlined map; conflicts with struct field`, +}} + +func (s *S) TestMarshalErrors(c *C) { + for _, item := range marshalErrorTests { + if item.panic != "" { + c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic) + } else { + _, err := yaml.Marshal(item.value) + c.Assert(err, ErrorMatches, item.error) + } + } +} + +func (s *S) TestMarshalTypeCache(c *C) { + var data []byte + var err error + func() { + type T struct{ A int } + data, err = yaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + func() { + type T struct{ B int } + data, err = yaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + c.Assert(string(data), Equals, "b: 0\n") +} + +var marshalerTests = []struct { + data string + value interface{} +}{ + {"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}}, + {"_:\n- 1\n- A\n", []interface{}{1, "A"}}, + {"_: 10\n", 10}, + {"_: null\n", nil}, + {"_: BAR!\n", "BAR!"}, +} + +type marshalerType struct { + value interface{} +} + +func (o marshalerType) MarshalText() ([]byte, error) { + panic("MarshalText called on type with MarshalYAML") +} + +func (o marshalerType) MarshalYAML() (interface{}, error) { + return o.value, nil +} + +type marshalerValue struct { + Field marshalerType "_" +} + +func (s *S) TestMarshaler(c *C) { + for _, item := range marshalerTests { + obj := &marshalerValue{} + obj.Field.value = item.value + data, err := yaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, string(item.data)) + } +} + +func (s *S) TestMarshalerWholeDocument(c *C) { + obj := &marshalerType{} + obj.value = map[string]string{"hello": "world!"} + data, err := yaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, "hello: world!\n") +} + +type failingMarshaler struct{} + +func (ft *failingMarshaler) MarshalYAML() (interface{}, error) { + return nil, failingErr +} + +func (s *S) TestMarshalerError(c *C) { + _, err := yaml.Marshal(&failingMarshaler{}) + c.Assert(err, Equals, failingErr) +} + +func (s *S) TestSortedOutput(c *C) { + order := []interface{}{ + false, + true, + 1, + uint(1), + 1.0, + 1.1, + 1.2, + 2, + uint(2), + 2.0, + 2.1, + "", + ".1", + ".2", + ".a", + "1", + "2", + "a!10", + "a/2", + "a/10", + "a~10", + "ab/1", + "b/1", + "b/01", + "b/2", + "b/02", + "b/3", + "b/03", + "b1", + "b01", + "b3", + "c2.10", + "c10.2", + "d1", + "d12", + "d12a", + } + m := make(map[interface{}]int) + for _, k := range order { + m[k] = 1 + } + data, err := yaml.Marshal(m) + c.Assert(err, IsNil) + out := "\n" + string(data) + last := 0 + for i, k := range order { + repr := fmt.Sprint(k) + if s, ok := k.(string); ok { + if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil { + repr = `"` + repr + `"` + } + } + index := strings.Index(out, "\n"+repr+":") + if index == -1 { + c.Fatalf("%#v is not in the output: %#v", k, out) + } + if index < last { + c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out) + } + last = index + } +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 00000000000..0a7037ad1b2 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1096 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } + return false +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 00000000000..d5fb0972772 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,391 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + } + buffer_len += width + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 00000000000..93a86327434 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,203 @@ +package yaml + +import ( + "encoding/base64" + "math" + "strconv" + "strings" + "unicode/utf8" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: + return true + } + return false +} + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt(plain[3:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, -int(intv) + } else { + return yaml_INT_TAG, -intv + } + } + } + // XXX Handle timestamps here. + + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + if tag == yaml_BINARY_TAG { + return yaml_BINARY_TAG, in + } + if utf8.ValidString(in) { + return yaml_STR_TAG, in + } + return yaml_BINARY_TAG, encodeBase64(in) +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 00000000000..fe93b190c2a --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2710 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // A simple key is required only when it is the first token in the current + // line. Therefore it is always allowed. But we add a check anyway. + if required && !parser.simple_key_allowed { + panic("should not happen") + } + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each intendation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the intendation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found uknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && !(s[0] == '!' && s[1] == 0) { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the tag is non-empty. + if len(s) == 0 { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the intendation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an intendation indicator equal to 0") + return false + } + + // Get the intendation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an intendation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the intendation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following intendation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan intendation spaces and line breaks for a block scalar. Determine the +// intendation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the intendation spaces and line breaks. + max_indent := 0 + for { + // Eat the intendation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the intendation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an intendation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". + if parser.flow_level > 0 && + parser.buffer[parser.buffer_pos] == ':' && + !is_blankz(parser.buffer, parser.buffer_pos+1) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found unexpected ':'") + return false + } + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab character that abuse intendation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violate intendation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check intendation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 00000000000..5958822f9c6 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,104 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/suite_test.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/suite_test.go new file mode 100644 index 00000000000..c5cf1ed4f6e --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/suite_test.go @@ -0,0 +1,12 @@ +package yaml_test + +import ( + . "gopkg.in/check.v1" + "testing" +) + +func Test(t *testing.T) { TestingT(t) } + +type S struct{} + +var _ = Suite(&S{}) diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 00000000000..190362f25df --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,89 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + // If the output encoding is UTF-8, we don't need to recode the buffer. + if emitter.encoding == yaml_UTF8_ENCODING { + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true + } + + // Recode the buffer into the raw buffer. + var low, high int + if emitter.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + pos := 0 + for pos < emitter.buffer_pos { + // See the "reader.c" code for more details on UTF-8 encoding. Note + // that we assume that the buffer contains a valid UTF-8 sequence. + + // Read the next UTF-8 character. + octet := emitter.buffer[pos] + + var w int + var value rune + switch { + case octet&0x80 == 0x00: + w, value = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, value = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, value = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, value = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = emitter.buffer[pos+k] + value = (value << 6) + (rune(octet) & 0x3F) + } + pos += w + + // Write the character. + if value < 0x10000 { + var b [2]byte + b[high] = byte(value >> 8) + b[low] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) + } else { + // Write the character using a surrogate pair (check "reader.c"). + var b [4]byte + value -= 0x10000 + b[high] = byte(0xD8 + (value >> 18)) + b[low] = byte((value >> 10) & 0xFF) + b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) + b[low+2] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) + } + } + + // Write the raw buffer. + if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + emitter.raw_buffer = emitter.raw_buffer[:0] + return true +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 00000000000..d133edf9d34 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,346 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only unmarshalled if they are exported (have an upper case +// first letter), and are unmarshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Does not apply to zero valued structs. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int "a,omitempty" +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshal("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 00000000000..d60a6b6b003 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,716 @@ +package yaml + +import ( + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occured. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_file io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_file io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go b/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 00000000000..8110ce3c37a --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} From b3d9d05d40506d9c59ec2143053a706658882c42 Mon Sep 17 00:00:00 2001 From: Tudor Golubenco Date: Thu, 13 Aug 2015 11:10:43 +0200 Subject: [PATCH 2/2] Simple Makefile and ES template. --- Makefile | 34 ++++++++++++++++++++++++++++++++++ etc/topbeat.template.json | 29 +++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100644 Makefile create mode 100644 etc/topbeat.template.json diff --git a/Makefile b/Makefile new file mode 100644 index 00000000000..771d0adc53a --- /dev/null +++ b/Makefile @@ -0,0 +1,34 @@ +GODEP=$(GOPATH)/bin/godep +PREFIX?=/build + +GOFILES = $(shell find . -type f -name '*.go') +topbeat: $(GOFILES) + # first make sure we have godep + go get github.com/tools/godep + $(GODEP) go build + +.PHONY: getdeps +getdeps: + go get -t -u -f + + +.PHONY: install_cfg +install_cfg: + cp etc/topbeat.yml $(PREFIX)/topbeat-linux.yml + cp etc/topbeat.template.json $(PREFIX)/topbeat.template.json + # darwin + cp etc/topbeat.yml $(PREFIX)/topbeat-darwin.yml + # win + cp etc/topbeat.yml $(PREFIX)/topbeat-win.yml + +.PHONY: cover +cover: + # gotestcover is needed to fetch coverage for multiple packages + go get github.com/pierrre/gotestcover + GOPATH=$(shell $(GODEP) path):$(GOPATH) $(GOPATH)/bin/gotestcover -coverprofile=profile.cov -covermode=count github.com/elastic/topbeat/... + mkdir -p cover + $(GODEP) go tool cover -html=profile.cov -o cover/coverage.html + +.PHONY: clean +clean: + rm topbeat || true diff --git a/etc/topbeat.template.json b/etc/topbeat.template.json new file mode 100644 index 00000000000..7d8838a1595 --- /dev/null +++ b/etc/topbeat.template.json @@ -0,0 +1,29 @@ +{ + "mappings": { + "_default_": { + "_all": { + "enabled": true, + "norms": { + "enabled": false + } + }, + "dynamic_templates": [ + { + "template1": { + "mapping": { + "doc_values": true, + "ignore_above": 1024, + "index": "not_analyzed", + "type": "{dynamic_type}" + }, + "match": "*" + } + } + ] + } + }, + "settings": { + "index.refresh_interval": "5s" + }, + "template": "topbeat-*" +}