From 6841750ea17cdcb90b0eb488791077e615a52adf Mon Sep 17 00:00:00 2001 From: Alexander Kazakov Date: Fri, 23 Jun 2017 16:50:03 +0300 Subject: [PATCH 1/2] Aerospike module --- NOTICE | 73 + metricbeat/docker-compose.yml | 5 + metricbeat/docs/fields.asciidoc | 302 ++ metricbeat/docs/modules/aerospike.asciidoc | 40 + .../docs/modules/aerospike/namespace.asciidoc | 19 + metricbeat/docs/modules_list.asciidoc | 2 + metricbeat/include/list.go | 2 + metricbeat/metricbeat.reference.yml | 7 + metricbeat/module/aerospike/_meta/Dockerfile | 4 + metricbeat/module/aerospike/_meta/config.yml | 5 + .../module/aerospike/_meta/docs.asciidoc | 8 + metricbeat/module/aerospike/_meta/env | 2 + metricbeat/module/aerospike/_meta/fields.yml | 11 + metricbeat/module/aerospike/aerospike.go | 35 + metricbeat/module/aerospike/aerospike_test.go | 81 + metricbeat/module/aerospike/doc.go | 4 + .../aerospike/namespace/_meta/data.json | 80 + .../aerospike/namespace/_meta/docs.asciidoc | 3 + .../aerospike/namespace/_meta/fields.yml | 157 + metricbeat/module/aerospike/namespace/data.go | 67 + .../module/aerospike/namespace/namespace.go | 93 + .../namespace/namespace_integration_test.go | 26 + metricbeat/module/aerospike/testing.go | 31 + metricbeat/tests/system/test_aerospike.py | 37 + .../aerospike-client-go/CHANGELOG.md | 913 +++++ .../aerospike/aerospike-client-go/LICENSE | 202 + .../aerospike/aerospike-client-go/README.md | 160 + .../aerospike-client-go/admin_command.go | 651 +++ .../aerospike-client-go/admin_policy.go | 32 + .../aerospike-client-go/aerospike.go | 1 + .../aerospike-client-go/batch_command.go | 295 ++ .../batch_command_exists.go | 115 + .../aerospike-client-go/batch_command_get.go | 175 + .../batch_command_get_reflect.go | 92 + .../batch_command_reflect.go | 87 + .../aerospike-client-go/batch_node.go | 117 + .../aerospike/aerospike-client-go/bin.go | 41 + .../aerospike-client-go/bytes_buffer.go | 30 + .../aerospike/aerospike-client-go/cdt_list.go | 289 ++ .../aerospike/aerospike-client-go/cdt_map.go | 542 +++ .../aerospike/aerospike-client-go/client.go | 1378 +++++++ .../aerospike-client-go/client_policy.go | 118 + .../aerospike-client-go/client_reflect.go | 241 ++ .../aerospike/aerospike-client-go/cluster.go | 989 +++++ .../aerospike/aerospike-client-go/command.go | 1385 +++++++ .../aerospike-client-go/commit_policy.go | 29 + .../aerospike-client-go/compat_after_go1.8.go | 25 + .../compat_before_go1.8.go | 44 + .../aerospike-client-go/connection.go | 257 ++ .../aerospike-client-go/connection_queue.go | 163 + .../aerospike-client-go/consistency_level.go | 32 + .../aerospike-client-go/delete_command.go | 82 + .../aerospike-client-go/execute_command.go | 54 + .../aerospike-client-go/execute_task.go | 104 + .../aerospike-client-go/exists_command.go | 79 + .../aerospike-client-go/field_type.go | 48 + .../aerospike/aerospike-client-go/filter.go | 194 + .../aerospike-client-go/generation_policy.go | 30 + .../aerospike/aerospike-client-go/generics.go | 3643 +++++++++++++++++ .../aerospike/aerospike-client-go/host.go | 48 + .../index_collection_type.go | 53 + .../aerospike-client-go/index_type.go | 29 + .../aerospike/aerospike-client-go/info.go | 149 + .../internal/lua/instance.go | 71 + .../aerospike-client-go/internal/lua/lua.go | 155 + .../internal/lua/lua_aerospike.go | 65 + .../internal/lua/lua_list.go | 400 ++ .../internal/lua/lua_map.go | 389 ++ .../internal/lua/lua_stream.go | 128 + .../internal/lua/resources/aerospike.go | 201 + .../internal/lua/resources/stream_ops.go | 358 ++ .../aerospike/aerospike-client-go/key.go | 159 + .../aerospike-client-go/key_helper.go | 149 + .../aerospike/aerospike-client-go/language.go | 24 + .../aerospike-client-go/large_list.go | 262 ++ .../aerospike-client-go/large_map.go | 124 + .../aerospike-client-go/large_object.go | 105 + .../aerospike-client-go/large_set.go | 97 + .../aerospike-client-go/large_stack.go | 107 + .../aerospike-client-go/logger/logger.go | 111 + .../aerospike/aerospike-client-go/marshal.go | 341 ++ .../aerospike-client-go/multi_policy.go | 47 + .../aerospike/aerospike-client-go/node.go | 602 +++ .../aerospike-client-go/node_error.go | 46 + .../aerospike-client-go/node_validator.go | 211 + .../aerospike-client-go/operate_command.go | 42 + .../aerospike-client-go/operation.go | 119 + .../aerospike/aerospike-client-go/packer.go | 662 +++ .../aerospike-client-go/packer_reflect.go | 75 + .../aerospike-client-go/partition.go | 65 + .../aerospike-client-go/partition_parser.go | 339 ++ .../aerospike/aerospike-client-go/peers.go | 101 + .../aerospike-client-go/peers_parser.go | 335 ++ .../aerospike-client-go/pkg/bcrypt/LICENSE | 29 + .../aerospike-client-go/pkg/bcrypt/README | 46 + .../aerospike-client-go/pkg/bcrypt/bcrypt.go | 190 + .../aerospike-client-go/pkg/bcrypt/cipher.go | 415 ++ .../pkg/ripemd160/ripemd160.go | 121 + .../pkg/ripemd160/ripemd160block.go | 161 + .../aerospike/aerospike-client-go/policy.go | 84 + .../aerospike/aerospike-client-go/predexp.go | 619 +++ .../aerospike/aerospike-client-go/priority.go | 33 + .../aerospike-client-go/privilege.go | 109 + .../query_aggregate_command.go | 163 + .../aerospike-client-go/query_command.go | 52 + .../query_objects_command.go | 40 + .../aerospike-client-go/query_policy.go | 27 + .../query_record_command.go | 31 + .../aerospike-client-go/read_command.go | 210 + .../read_command_reflect.go | 451 ++ .../read_header_command.go | 88 + .../aerospike/aerospike-client-go/record.go | 61 + .../record_exists_action.go | 45 + .../aerospike-client-go/recordset.go | 246 ++ .../aerospike-client-go/replica_policy.go | 37 + .../aerospike/aerospike-client-go/role.go | 44 + .../aerospike-client-go/scan_command.go | 71 + .../scan_objects_command.go | 73 + .../aerospike-client-go/scan_policy.go | 60 + .../aerospike-client-go/server_command.go | 90 + .../aerospike-client-go/single_command.go | 65 + .../aerospike-client-go/statement.go | 133 + .../aerospike/aerospike-client-go/task.go | 78 + .../aerospike-client-go/task_drop_index.go | 64 + .../aerospike-client-go/task_index.go | 82 + .../aerospike-client-go/task_register.go | 65 + .../aerospike-client-go/task_remove.go | 65 + .../aerospike-client-go/touch_command.go | 78 + .../aerospike-client-go/types/atomic/array.go | 72 + .../aerospike-client-go/types/atomic/bool.go | 65 + .../aerospike-client-go/types/atomic/int.go | 83 + .../aerospike-client-go/types/atomic/queue.go | 81 + .../types/atomic/sync_val.go | 51 + .../aerospike-client-go/types/buffer_pool.go | 75 + .../aerospike-client-go/types/epoc.go | 22 + .../aerospike-client-go/types/error.go | 50 + .../aerospike-client-go/types/message.go | 98 + .../types/particle_type/particle_type.go | 42 + .../aerospike-client-go/types/pool.go | 72 + .../types/rand/xor_shift128.go | 72 + .../aerospike-client-go/types/result_code.go | 507 +++ .../aerospike-client-go/types/types.go | 15 + .../aerospike/aerospike-client-go/udf.go | 11 + .../aerospike/aerospike-client-go/unpacker.go | 390 ++ .../aerospike-client-go/user_roles.go | 25 + .../utils/buffer/buffer.go | 136 + .../aerospike/aerospike-client-go/value.go | 1053 +++++ .../aerospike-client-go/value_helpers.go | 59 + .../aerospike-client-go/value_reflect.go | 56 + .../aerospike-client-go/write_command.go | 90 + .../aerospike-client-go/write_policy.go | 92 + vendor/github.com/yuin/gopher-lua/LICENSE | 21 + vendor/github.com/yuin/gopher-lua/Makefile | 10 + vendor/github.com/yuin/gopher-lua/README.rst | 800 ++++ vendor/github.com/yuin/gopher-lua/_state.go | 1785 ++++++++ vendor/github.com/yuin/gopher-lua/_vm.go | 1019 +++++ vendor/github.com/yuin/gopher-lua/alloc.go | 73 + vendor/github.com/yuin/gopher-lua/ast/ast.go | 29 + vendor/github.com/yuin/gopher-lua/ast/expr.go | 137 + vendor/github.com/yuin/gopher-lua/ast/misc.go | 17 + vendor/github.com/yuin/gopher-lua/ast/stmt.go | 95 + .../github.com/yuin/gopher-lua/ast/token.go | 22 + vendor/github.com/yuin/gopher-lua/auxlib.go | 458 +++ vendor/github.com/yuin/gopher-lua/baselib.go | 562 +++ .../github.com/yuin/gopher-lua/channellib.go | 152 + vendor/github.com/yuin/gopher-lua/compile.go | 1655 ++++++++ vendor/github.com/yuin/gopher-lua/config.go | 34 + .../yuin/gopher-lua/coroutinelib.go | 112 + vendor/github.com/yuin/gopher-lua/debuglib.go | 162 + vendor/github.com/yuin/gopher-lua/function.go | 193 + vendor/github.com/yuin/gopher-lua/iolib.go | 743 ++++ vendor/github.com/yuin/gopher-lua/linit.go | 54 + vendor/github.com/yuin/gopher-lua/loadlib.go | 125 + vendor/github.com/yuin/gopher-lua/mathlib.go | 231 ++ vendor/github.com/yuin/gopher-lua/opcode.go | 371 ++ vendor/github.com/yuin/gopher-lua/oslib.go | 206 + vendor/github.com/yuin/gopher-lua/package.go | 7 + .../github.com/yuin/gopher-lua/parse/Makefile | 4 + .../github.com/yuin/gopher-lua/parse/lexer.go | 533 +++ .../yuin/gopher-lua/parse/parser.go | 1137 +++++ .../yuin/gopher-lua/parse/parser.go.y | 524 +++ vendor/github.com/yuin/gopher-lua/pm/pm.go | 637 +++ vendor/github.com/yuin/gopher-lua/state.go | 1870 +++++++++ .../github.com/yuin/gopher-lua/stringlib.go | 442 ++ vendor/github.com/yuin/gopher-lua/table.go | 388 ++ vendor/github.com/yuin/gopher-lua/tablelib.go | 96 + vendor/github.com/yuin/gopher-lua/utils.go | 262 ++ vendor/github.com/yuin/gopher-lua/value.go | 247 ++ vendor/github.com/yuin/gopher-lua/vm.go | 1390 +++++++ vendor/vendor.json | 90 + 190 files changed, 44036 insertions(+) create mode 100644 metricbeat/docs/modules/aerospike.asciidoc create mode 100644 metricbeat/docs/modules/aerospike/namespace.asciidoc create mode 100644 metricbeat/module/aerospike/_meta/Dockerfile create mode 100644 metricbeat/module/aerospike/_meta/config.yml create mode 100644 metricbeat/module/aerospike/_meta/docs.asciidoc create mode 100644 metricbeat/module/aerospike/_meta/env create mode 100644 metricbeat/module/aerospike/_meta/fields.yml create mode 100644 metricbeat/module/aerospike/aerospike.go create mode 100644 metricbeat/module/aerospike/aerospike_test.go create mode 100644 metricbeat/module/aerospike/doc.go create mode 100644 metricbeat/module/aerospike/namespace/_meta/data.json create mode 100644 metricbeat/module/aerospike/namespace/_meta/docs.asciidoc create mode 100644 metricbeat/module/aerospike/namespace/_meta/fields.yml create mode 100644 metricbeat/module/aerospike/namespace/data.go create mode 100644 metricbeat/module/aerospike/namespace/namespace.go create mode 100644 metricbeat/module/aerospike/namespace/namespace_integration_test.go create mode 100644 metricbeat/module/aerospike/testing.go create mode 100644 metricbeat/tests/system/test_aerospike.py create mode 100644 vendor/github.com/aerospike/aerospike-client-go/CHANGELOG.md create mode 100644 vendor/github.com/aerospike/aerospike-client-go/LICENSE create mode 100644 vendor/github.com/aerospike/aerospike-client-go/README.md create mode 100644 vendor/github.com/aerospike/aerospike-client-go/admin_command.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/admin_policy.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/aerospike.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/batch_command.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/batch_command_exists.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/batch_command_get.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/batch_command_get_reflect.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/batch_command_reflect.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/batch_node.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/bin.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/bytes_buffer.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/cdt_list.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/cdt_map.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/client.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/client_policy.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/client_reflect.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/cluster.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/command.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/commit_policy.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/compat_after_go1.8.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/compat_before_go1.8.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/connection.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/connection_queue.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/consistency_level.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/delete_command.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/execute_command.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/execute_task.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/exists_command.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/field_type.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/filter.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/generation_policy.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/generics.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/host.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/index_collection_type.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/index_type.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/info.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/internal/lua/instance.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/internal/lua/lua.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/internal/lua/lua_aerospike.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/internal/lua/lua_list.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/internal/lua/lua_map.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/internal/lua/lua_stream.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/internal/lua/resources/aerospike.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/internal/lua/resources/stream_ops.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/key.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/key_helper.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/language.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/large_list.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/large_map.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/large_object.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/large_set.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/large_stack.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/logger/logger.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/marshal.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/multi_policy.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/node.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/node_error.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/node_validator.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/operate_command.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/operation.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/packer.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/packer_reflect.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/partition.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/partition_parser.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/peers.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/peers_parser.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/pkg/bcrypt/LICENSE create mode 100644 vendor/github.com/aerospike/aerospike-client-go/pkg/bcrypt/README create mode 100644 vendor/github.com/aerospike/aerospike-client-go/pkg/bcrypt/bcrypt.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/pkg/bcrypt/cipher.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/pkg/ripemd160/ripemd160.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/pkg/ripemd160/ripemd160block.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/policy.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/predexp.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/priority.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/privilege.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/query_aggregate_command.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/query_command.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/query_objects_command.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/query_policy.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/query_record_command.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/read_command.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/read_command_reflect.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/read_header_command.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/record.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/record_exists_action.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/recordset.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/replica_policy.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/role.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/scan_command.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/scan_objects_command.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/scan_policy.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/server_command.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/single_command.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/statement.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/task.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/task_drop_index.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/task_index.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/task_register.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/task_remove.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/touch_command.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/types/atomic/array.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/types/atomic/bool.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/types/atomic/int.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/types/atomic/queue.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/types/atomic/sync_val.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/types/buffer_pool.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/types/epoc.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/types/error.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/types/message.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/types/particle_type/particle_type.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/types/pool.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/types/rand/xor_shift128.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/types/result_code.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/types/types.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/udf.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/unpacker.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/user_roles.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/utils/buffer/buffer.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/value.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/value_helpers.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/value_reflect.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/write_command.go create mode 100644 vendor/github.com/aerospike/aerospike-client-go/write_policy.go create mode 100644 vendor/github.com/yuin/gopher-lua/LICENSE create mode 100644 vendor/github.com/yuin/gopher-lua/Makefile create mode 100644 vendor/github.com/yuin/gopher-lua/README.rst create mode 100644 vendor/github.com/yuin/gopher-lua/_state.go create mode 100644 vendor/github.com/yuin/gopher-lua/_vm.go create mode 100644 vendor/github.com/yuin/gopher-lua/alloc.go create mode 100644 vendor/github.com/yuin/gopher-lua/ast/ast.go create mode 100644 vendor/github.com/yuin/gopher-lua/ast/expr.go create mode 100644 vendor/github.com/yuin/gopher-lua/ast/misc.go create mode 100644 vendor/github.com/yuin/gopher-lua/ast/stmt.go create mode 100644 vendor/github.com/yuin/gopher-lua/ast/token.go create mode 100644 vendor/github.com/yuin/gopher-lua/auxlib.go create mode 100644 vendor/github.com/yuin/gopher-lua/baselib.go create mode 100644 vendor/github.com/yuin/gopher-lua/channellib.go create mode 100644 vendor/github.com/yuin/gopher-lua/compile.go create mode 100644 vendor/github.com/yuin/gopher-lua/config.go create mode 100644 vendor/github.com/yuin/gopher-lua/coroutinelib.go create mode 100644 vendor/github.com/yuin/gopher-lua/debuglib.go create mode 100644 vendor/github.com/yuin/gopher-lua/function.go create mode 100644 vendor/github.com/yuin/gopher-lua/iolib.go create mode 100644 vendor/github.com/yuin/gopher-lua/linit.go create mode 100644 vendor/github.com/yuin/gopher-lua/loadlib.go create mode 100644 vendor/github.com/yuin/gopher-lua/mathlib.go create mode 100644 vendor/github.com/yuin/gopher-lua/opcode.go create mode 100644 vendor/github.com/yuin/gopher-lua/oslib.go create mode 100644 vendor/github.com/yuin/gopher-lua/package.go create mode 100644 vendor/github.com/yuin/gopher-lua/parse/Makefile create mode 100644 vendor/github.com/yuin/gopher-lua/parse/lexer.go create mode 100644 vendor/github.com/yuin/gopher-lua/parse/parser.go create mode 100644 vendor/github.com/yuin/gopher-lua/parse/parser.go.y create mode 100644 vendor/github.com/yuin/gopher-lua/pm/pm.go create mode 100644 vendor/github.com/yuin/gopher-lua/state.go create mode 100644 vendor/github.com/yuin/gopher-lua/stringlib.go create mode 100644 vendor/github.com/yuin/gopher-lua/table.go create mode 100644 vendor/github.com/yuin/gopher-lua/tablelib.go create mode 100644 vendor/github.com/yuin/gopher-lua/utils.go create mode 100644 vendor/github.com/yuin/gopher-lua/value.go create mode 100644 vendor/github.com/yuin/gopher-lua/vm.go diff --git a/NOTICE b/NOTICE index b3f5ac2b830..a91510b7e4d 100644 --- a/NOTICE +++ b/NOTICE @@ -9,6 +9,51 @@ Third party libraries used by the Beats project: ========================================================================== +-------------------------------------------------------------------- +Dependency: github.com/aerospike/aerospike-client-go +Revision: 0f3b54da6bdc2c31c505f9afbc5f434dd2089658 +License type (autodetected): Apache License 2.0 +./vendor/github.com/aerospike/aerospike-client-go/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + +-------------------------------------------------------------------- +Dependency: github.com/aerospike/aerospike-client-go/pkg/bcrypt +Revision: 0f3b54da6bdc2c31c505f9afbc5f434dd2089658 +License type (autodetected): BSD 3-clause license +./vendor/github.com/aerospike/aerospike-client-go/pkg/bcrypt/LICENSE: +-------------------------------------------------------------------- +Copyright (c) 2011 James Keane . All rights reserved. +Copyright (c) 2006 Damien Miller . +Copyright (c) 2011 ZooWar.com, All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of weekendlogic nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + -------------------------------------------------------------------- Dependency: github.com/andrewkroh/sys Revision: 287798fe3e430efeb9318b95ff52353aaa2b59b1 @@ -3101,6 +3146,34 @@ License type (autodetected): Apache License 2.0 Apache License 2.0 +-------------------------------------------------------------------- +Dependency: github.com/yuin/gopher-lua +Revision: b402f3114ec730d8bddb074a6c137309f561aa78 +License type (autodetected): MIT license +./vendor/github.com/yuin/gopher-lua/LICENSE: +-------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2015 Yusuke Inuzuka + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + -------------------------------------------------------------------- Dependency: golang.org/x/net Revision: e90d6d0afc4c315a0d87a568ae68577cc15149a0 diff --git a/metricbeat/docker-compose.yml b/metricbeat/docker-compose.yml index 8e316130d50..8e54628ceed 100644 --- a/metricbeat/docker-compose.yml +++ b/metricbeat/docker-compose.yml @@ -16,6 +16,7 @@ services: - proxy_dep env_file: + - ${PWD}/module/aerospike/_meta/env - ${PWD}/module/apache/_meta/env - ${PWD}/module/ceph/_meta/env - ${PWD}/module/couchbase/_meta/env @@ -43,6 +44,7 @@ services: proxy_dep: image: busybox depends_on: + aerospike: { condition: service_healthy } apache: { condition: service_healthy } ceph: { condition: service_healthy } couchbase: { condition: service_healthy } @@ -67,6 +69,9 @@ services: zookeeper: { condition: service_healthy } # Modules + aerospike: + build: ${PWD}/module/aerospike/_meta + apache: build: ${PWD}/module/apache/_meta diff --git a/metricbeat/docs/fields.asciidoc b/metricbeat/docs/fields.asciidoc index 271622270b3..2903d149931 100644 --- a/metricbeat/docs/fields.asciidoc +++ b/metricbeat/docs/fields.asciidoc @@ -12,6 +12,7 @@ This file is generated! See _meta/fields.yml and scripts/generate_field_docs.py This document describes the fields that are exported by Metricbeat. They are grouped in the following categories: +* <> * <> * <> * <> @@ -45,6 +46,307 @@ grouped in the following categories: * <> -- +[[exported-fields-aerospike]] +== aerospike Fields + +[]experimental +aerospike Module + + + +[float] +== aerospike Fields + + + + +[float] +== namespace Fields + +namespace + + + +[float] +== client Fields + +Client stats. + + + +[float] +== delete Fields + +Client delete transactions stats. + + + +[float] +=== aerospike.namespace.client.delete.error + +type: long + +Number of client delete transactions that failed with an error. + + +[float] +=== aerospike.namespace.client.delete.not_found + +type: long + +Number of client delete transactions that resulted in a not found. + + +[float] +=== aerospike.namespace.client.delete.success + +type: long + +Number of successful client delete transactions. + + +[float] +=== aerospike.namespace.client.delete.timeout + +type: long + +Number of client delete transactions that timed out. + + +[float] +== read Fields + +Client read transactions stats. + + + +[float] +=== aerospike.namespace.client.read.error + +type: long + +Number of client read transaction errors. + + +[float] +=== aerospike.namespace.client.read.not_found + +type: long + +Number of client read transaction that resulted in not found. + + +[float] +=== aerospike.namespace.client.read.success + +type: long + +Number of successful client read transactions. + + +[float] +=== aerospike.namespace.client.read.timeout + +type: long + +Number of client read transaction that timed out. + + +[float] +== write Fields + +Client write transactions stats. + + + +[float] +=== aerospike.namespace.client.write.error + +type: long + +Number of client write transactions that failed with an error. + + +[float] +=== aerospike.namespace.client.write.success + +type: long + +Number of successful client write transactions. + + +[float] +=== aerospike.namespace.client.write.timeout + +type: long + +Number of client write transactions that timed out. + + +[float] +== device Fields + +Disk storage stats + + + +[float] +=== aerospike.namespace.device.available.pct + +type: scaled_float + +format: percent + +Measures the minimum contiguous disk space across all disks in a namespace. + + +[float] +=== aerospike.namespace.device.free.pct + +type: scaled_float + +format: percent + +Percentage of disk capacity free for this namespace. + + +[float] +=== aerospike.namespace.device.total.bytes + +type: long + +format: bytes + +Total bytes of disk space allocated to this namespace on this node. + + +[float] +=== aerospike.namespace.device.used.bytes + +type: long + +format: bytes + +Total bytes of disk space used by this namespace on this node. + + +[float] +=== aerospike.namespace.hwm_breached + +type: boolean + +If true, Aerospike has breached 'high-water-[disk|memory]-pct' for this namespace. + + +[float] +== memory Fields + +Memory storage stats. + + + +[float] +=== aerospike.namespace.memory.free.pct + +type: scaled_float + +format: percent + +Percentage of memory capacity free for this namespace on this node. + + +[float] +=== aerospike.namespace.memory.used.data.bytes + +type: long + +format: bytes + +Amount of memory occupied by data for this namespace on this node. + + +[float] +=== aerospike.namespace.memory.used.index.bytes + +type: long + +format: bytes + +Amount of memory occupied by the index for this namespace on this node. + + +[float] +=== aerospike.namespace.memory.used.sindex.bytes + +type: long + +format: bytes + +Amount of memory occupied by secondary indexes for this namespace on this node. + + +[float] +=== aerospike.namespace.memory.used.total.bytes + +type: long + +format: bytes + +Total bytes of memory used by this namespace on this node. + + +[float] +=== aerospike.namespace.name + +type: keyword + +Namespace name + + +[float] +=== aerospike.namespace.node.host + +type: keyword + +[float] +=== aerospike.namespace.node.name + +type: keyword + +Node name + + +[float] +== objects Fields + +Records stats. + + + +[float] +=== aerospike.namespace.objects.master + +type: long + +Number of records on this node which are active masters. + + +[float] +=== aerospike.namespace.objects.total + +type: long + +Number of records in this namespace for this node. + + +[float] +=== aerospike.namespace.stop_writes + +type: boolean + +If true this namespace is currently not allowing writes. + + [[exported-fields-apache]] == Apache Fields diff --git a/metricbeat/docs/modules/aerospike.asciidoc b/metricbeat/docs/modules/aerospike.asciidoc new file mode 100644 index 00000000000..03e404873a4 --- /dev/null +++ b/metricbeat/docs/modules/aerospike.asciidoc @@ -0,0 +1,40 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-module-aerospike]] +== aerospike Module + +This is the aerospike Module. It uses http://www.aerospike.com/docs/reference/info[Info command] to collect metrics. + +[float] +=== Compatibility + +The Aeropsike metricsets were tested with Aerospike 3.9 and are expected to work with all versions >= 3.9. + + +[float] +=== Example Configuration + +The aerospike module supports the standard configuration options that are described +in <>. Here is an example configuration: + +[source,yaml] +---- +metricbeat.modules: +- module: aerospike + metricsets: ["namespace"] + enabled: false + period: 10s + hosts: ["localhost:3000"] +---- + +[float] +=== Metricsets + +The following metricsets are available: + +* <> + +include::aerospike/namespace.asciidoc[] + diff --git a/metricbeat/docs/modules/aerospike/namespace.asciidoc b/metricbeat/docs/modules/aerospike/namespace.asciidoc new file mode 100644 index 00000000000..a38fe9fd8c0 --- /dev/null +++ b/metricbeat/docs/modules/aerospike/namespace.asciidoc @@ -0,0 +1,19 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-metricset-aerospike-namespace]] +include::../../../module/aerospike/namespace/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../module/aerospike/namespace/_meta/data.json[] +---- diff --git a/metricbeat/docs/modules_list.asciidoc b/metricbeat/docs/modules_list.asciidoc index f3c1b2906d5..2c070cd896f 100644 --- a/metricbeat/docs/modules_list.asciidoc +++ b/metricbeat/docs/modules_list.asciidoc @@ -2,6 +2,7 @@ This file is generated! See scripts/docs_collector.py //// + * <> * <> * <> * <> @@ -32,6 +33,7 @@ This file is generated! See scripts/docs_collector.py -- +include::modules/aerospike.asciidoc[] include::modules/apache.asciidoc[] include::modules/ceph.asciidoc[] include::modules/couchbase.asciidoc[] diff --git a/metricbeat/include/list.go b/metricbeat/include/list.go index c3243f2e7dc..b6973935c42 100644 --- a/metricbeat/include/list.go +++ b/metricbeat/include/list.go @@ -8,6 +8,8 @@ package include import ( // This list is automatically generated by `make imports` + _ "github.com/elastic/beats/metricbeat/module/aerospike" + _ "github.com/elastic/beats/metricbeat/module/aerospike/namespace" _ "github.com/elastic/beats/metricbeat/module/apache" _ "github.com/elastic/beats/metricbeat/module/apache/status" _ "github.com/elastic/beats/metricbeat/module/ceph" diff --git a/metricbeat/metricbeat.reference.yml b/metricbeat/metricbeat.reference.yml index 8073659c2b5..7947352bbe3 100644 --- a/metricbeat/metricbeat.reference.yml +++ b/metricbeat/metricbeat.reference.yml @@ -83,6 +83,13 @@ metricbeat.modules: #socket.reverse_lookup.success_ttl: 60s #socket.reverse_lookup.failure_ttl: 60s +#------------------------------ aerospike Module ----------------------------- +- module: aerospike + metricsets: ["namespace"] + enabled: false + period: 10s + hosts: ["localhost:3000"] + #------------------------------- Apache Module ------------------------------- - module: apache metricsets: ["status"] diff --git a/metricbeat/module/aerospike/_meta/Dockerfile b/metricbeat/module/aerospike/_meta/Dockerfile new file mode 100644 index 00000000000..038cb31fca5 --- /dev/null +++ b/metricbeat/module/aerospike/_meta/Dockerfile @@ -0,0 +1,4 @@ +FROM aerospike:3.9.0 + +RUN apt-get update && apt-get install -y netcat +HEALTHCHECK CMD nc -z localhost 3000 diff --git a/metricbeat/module/aerospike/_meta/config.yml b/metricbeat/module/aerospike/_meta/config.yml new file mode 100644 index 00000000000..787e314dc60 --- /dev/null +++ b/metricbeat/module/aerospike/_meta/config.yml @@ -0,0 +1,5 @@ +- module: aerospike + metricsets: ["namespace"] + enabled: false + period: 10s + hosts: ["localhost:3000"] diff --git a/metricbeat/module/aerospike/_meta/docs.asciidoc b/metricbeat/module/aerospike/_meta/docs.asciidoc new file mode 100644 index 00000000000..369df65eaff --- /dev/null +++ b/metricbeat/module/aerospike/_meta/docs.asciidoc @@ -0,0 +1,8 @@ +== aerospike Module + +This is the aerospike Module. It uses http://www.aerospike.com/docs/reference/info[Info command] to collect metrics. + +[float] +=== Compatibility + +The Aeropsike metricsets were tested with Aerospike 3.9 and are expected to work with all versions >= 3.9. diff --git a/metricbeat/module/aerospike/_meta/env b/metricbeat/module/aerospike/_meta/env new file mode 100644 index 00000000000..2c95ea957cb --- /dev/null +++ b/metricbeat/module/aerospike/_meta/env @@ -0,0 +1,2 @@ +AEROSPIKE_HOST=aerospike +AEROSPIKE_PORT=3000 diff --git a/metricbeat/module/aerospike/_meta/fields.yml b/metricbeat/module/aerospike/_meta/fields.yml new file mode 100644 index 00000000000..cb3bb8d1416 --- /dev/null +++ b/metricbeat/module/aerospike/_meta/fields.yml @@ -0,0 +1,11 @@ +- key: aerospike + title: "aerospike" + description: > + []experimental + + aerospike Module + fields: + - name: aerospike + type: group + description: > + fields: diff --git a/metricbeat/module/aerospike/aerospike.go b/metricbeat/module/aerospike/aerospike.go new file mode 100644 index 00000000000..83173d90248 --- /dev/null +++ b/metricbeat/module/aerospike/aerospike.go @@ -0,0 +1,35 @@ +package aerospike + +import ( + "strconv" + "strings" + + "github.com/pkg/errors" + + as "github.com/aerospike/aerospike-client-go" +) + +func ParseHost(host string) (*as.Host, error) { + pieces := strings.Split(host, ":") + if len(pieces) != 2 { + return nil, errors.Errorf("Can't parse host %s", host) + } + port, err := strconv.Atoi(pieces[1]) + if err != nil { + return nil, errors.Wrapf(err, "Can't parse port") + } + return as.NewHost(pieces[0], port), nil +} + +func ParseInfo(info string) map[string]interface{} { + result := make(map[string]interface{}) + + for _, keyValueStr := range strings.Split(info, ";") { + KeyValArr := strings.Split(keyValueStr, "=") + if len(KeyValArr) == 2 { + result[KeyValArr[0]] = KeyValArr[1] + } + } + + return result +} diff --git a/metricbeat/module/aerospike/aerospike_test.go b/metricbeat/module/aerospike/aerospike_test.go new file mode 100644 index 00000000000..67315947183 --- /dev/null +++ b/metricbeat/module/aerospike/aerospike_test.go @@ -0,0 +1,81 @@ +package aerospike + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + + as "github.com/aerospike/aerospike-client-go" +) + +func TestParseHost(t *testing.T) { + tests := []struct { + Name string + Host string + expectedHost *as.Host + expectedErr error + }{ + { + Name: "with hostname and port", + Host: "localhost:3000", + expectedHost: as.NewHost("localhost", 3000), + }, + { + Name: "without port", + Host: "localhost", + expectedErr: errors.New("Can't parse host localhost"), + }, + { + Name: "with wrong port", + Host: "localhost:wrong", + expectedErr: errors.New("Can't parse port: strconv.Atoi: parsing \"wrong\": invalid syntax"), + }, + } + + for _, test := range tests { + result, err := ParseHost(test.Host) + if err != nil { + if test.expectedErr != nil { + assert.Equal(t, test.expectedErr.Error(), err.Error()) + continue + } + t.Error(err) + continue + } + + assert.Equal(t, test.expectedHost.String(), result.String(), test.Name) + } +} + +func TestParseInfo(t *testing.T) { + tests := []struct { + Name string + info string + expected map[string]interface{} + }{ + { + Name: "with kv", + info: "key1=value1;key2=value2", + expected: map[string]interface{}{ + "key1": "value1", + "key2": "value2", + }, + }, + { + Name: "without kv", + info: "wrong result", + expected: map[string]interface{}{}, + }, + { + Name: "mixed", + info: "wrong result;key=value", + expected: map[string]interface{}{"key": "value"}, + }, + } + + for _, test := range tests { + result := ParseInfo(test.info) + assert.Equal(t, test.expected, result, test.Name) + } +} diff --git a/metricbeat/module/aerospike/doc.go b/metricbeat/module/aerospike/doc.go new file mode 100644 index 00000000000..356b03de716 --- /dev/null +++ b/metricbeat/module/aerospike/doc.go @@ -0,0 +1,4 @@ +/* +Package aerospike is a Metricbeat module that contains MetricSets. +*/ +package aerospike diff --git a/metricbeat/module/aerospike/namespace/_meta/data.json b/metricbeat/module/aerospike/namespace/_meta/data.json new file mode 100644 index 00000000000..c17a1770f07 --- /dev/null +++ b/metricbeat/module/aerospike/namespace/_meta/data.json @@ -0,0 +1,80 @@ +{ + "@timestamp": "2016-05-23T08:05:34.853Z", + "aerospike": { + "namespace": { + "client": { + "delete": { + "error": 0, + "not_found": 0, + "success": 0, + "timeout": 0 + }, + "read": { + "error": 0, + "not_found": 0, + "success": 0, + "timeout": 0 + }, + "write": { + "error": 0, + "success": 0, + "timeout": 0 + } + }, + "device": { + "available": { + "pct": 99 + }, + "free": { + "pct": 100 + }, + "total": { + "bytes": 4294967296 + }, + "used": { + "bytes": 0 + } + }, + "hwm_breached": false, + "memory": { + "free": { + "pct": 100 + }, + "used": { + "data": { + "bytes": 0 + }, + "index": { + "bytes": 0 + }, + "sindex": { + "bytes": 0 + }, + "total": { + "bytes": 0 + } + } + }, + "name": "test", + "node": { + "host": "127.0.0.1:3000", + "name": "BB94D7CC01BE20A" + }, + "objects": { + "master": 0, + "total": 0 + }, + "stop_writes": false + } + }, + "beat": { + "hostname": "host.example.com", + "name": "host.example.com" + }, + "metricset": { + "host": "localhost:3000", + "module": "aerospike", + "name": "namespace", + "rtt": 115 + } +} diff --git a/metricbeat/module/aerospike/namespace/_meta/docs.asciidoc b/metricbeat/module/aerospike/namespace/_meta/docs.asciidoc new file mode 100644 index 00000000000..262f7e5928f --- /dev/null +++ b/metricbeat/module/aerospike/namespace/_meta/docs.asciidoc @@ -0,0 +1,3 @@ +=== aerospike namespace MetricSet + +This is the namespace metricset of the module aerospike. diff --git a/metricbeat/module/aerospike/namespace/_meta/fields.yml b/metricbeat/module/aerospike/namespace/_meta/fields.yml new file mode 100644 index 00000000000..fb6c267aeb0 --- /dev/null +++ b/metricbeat/module/aerospike/namespace/_meta/fields.yml @@ -0,0 +1,157 @@ +- name: namespace + type: group + description: > + namespace + fields: + - name: client + type: group + description: > + Client stats. + fields: + - name: delete + type: group + description: > + Client delete transactions stats. + fields: + - name: error + type: long + description: > + Number of client delete transactions that failed with an error. + - name: not_found + type: long + description: > + Number of client delete transactions that resulted in a not found. + - name: success + type: long + description: > + Number of successful client delete transactions. + - name: timeout + type: long + description: > + Number of client delete transactions that timed out. + - name: read + type: group + description: > + Client read transactions stats. + fields: + - name: error + type: long + description: > + Number of client read transaction errors. + - name: not_found + type: long + description: > + Number of client read transaction that resulted in not found. + - name: success + type: long + description: > + Number of successful client read transactions. + - name: timeout + type: long + description: > + Number of client read transaction that timed out. + - name: write + type: group + description: > + Client write transactions stats. + fields: + - name: error + type: long + description: > + Number of client write transactions that failed with an error. + - name: success + type: long + description: > + Number of successful client write transactions. + - name: timeout + type: long + description: > + Number of client write transactions that timed out. + - name: device + type: group + description: > + Disk storage stats + fields: + - name: available.pct + type: scaled_float + format: percent + description: > + Measures the minimum contiguous disk space across all disks in a namespace. + - name: free.pct + type: scaled_float + format: percent + description: > + Percentage of disk capacity free for this namespace. + - name: total.bytes + type: long + format: bytes + description: > + Total bytes of disk space allocated to this namespace on this node. + - name: used.bytes + type: long + format: bytes + description: > + Total bytes of disk space used by this namespace on this node. + - name: hwm_breached + type: boolean + description: > + If true, Aerospike has breached 'high-water-[disk|memory]-pct' for this namespace. + - name: memory + type: group + description: > + Memory storage stats. + fields: + - name: free.pct + type: scaled_float + format: percent + description: > + Percentage of memory capacity free for this namespace on this node. + - name: used.data.bytes + type: long + format: bytes + description: > + Amount of memory occupied by data for this namespace on this node. + - name: used.index.bytes + type: long + format: bytes + description: > + Amount of memory occupied by the index for this namespace on this node. + - name: used.sindex.bytes + type: long + format: bytes + description: > + Amount of memory occupied by secondary indexes for this namespace on this node. + - name: used.total.bytes + type: long + format: bytes + description: > + Total bytes of memory used by this namespace on this node. + - name: name + type: keyword + description: > + Namespace name + - name: node.host + type: keyword + desription: > + Node host + - name: node.name + type: keyword + description: > + Node name + - name: objects + type: group + description: > + Records stats. + fields: + - name: master + type: long + description: > + Number of records on this node which are active masters. + - name: total + type: long + description: > + Number of records in this namespace for this node. + - name: stop_writes + type: boolean + description: > + If true this namespace is currently not allowing writes. diff --git a/metricbeat/module/aerospike/namespace/data.go b/metricbeat/module/aerospike/namespace/data.go new file mode 100644 index 00000000000..a404b348028 --- /dev/null +++ b/metricbeat/module/aerospike/namespace/data.go @@ -0,0 +1,67 @@ +package namespace + +import ( + s "github.com/elastic/beats/libbeat/common/schema" + c "github.com/elastic/beats/libbeat/common/schema/mapstrstr" +) + +var schema = s.Schema{ + "client": s.Object{ + "delete": s.Object{ + "error": c.Int("client_delete_error"), + "not_found": c.Int("client_delete_not_found"), + "success": c.Int("client_delete_success"), + "timeout": c.Int("client_delete_timeout"), + }, + "read": s.Object{ + "error": c.Int("client_read_error"), + "not_found": c.Int("client_read_not_found"), + "success": c.Int("client_read_success"), + "timeout": c.Int("client_read_timeout"), + }, + "write": s.Object{ + "error": c.Int("client_write_error"), + "success": c.Int("client_write_success"), + "timeout": c.Int("client_write_timeout"), + }, + }, + "device": s.Object{ + "available": s.Object{ + "pct": c.Float("device_available_pct", s.Optional), + }, + "free": s.Object{ + "pct": c.Float("device_free_pct", s.Optional), + }, + "used": s.Object{ + "bytes": c.Int("device_used_bytes", s.Optional), + }, + "total": s.Object{ + "bytes": c.Int("device_total_bytes", s.Optional), + }, + }, + "hwm_breached": c.Bool("hwm_breached"), + "memory": s.Object{ + "free": s.Object{ + "pct": c.Float("memory_free_pct"), + }, + "used": s.Object{ + "data": s.Object{ + "bytes": c.Int("memory_used_data_bytes"), + }, + "index": s.Object{ + "bytes": c.Int("memory_used_index_bytes"), + }, + "sindex": s.Object{ + "bytes": c.Int("memory_used_sindex_bytes"), + }, + "total": s.Object{ + "bytes": c.Int("memory_used_bytes"), + }, + }, + }, + "objects": s.Object{ + "master": c.Int("master_objects"), + "total": c.Int("objects"), + }, + "stop_writes": c.Bool("stop_writes"), +} diff --git a/metricbeat/module/aerospike/namespace/namespace.go b/metricbeat/module/aerospike/namespace/namespace.go new file mode 100644 index 00000000000..a39fb55c785 --- /dev/null +++ b/metricbeat/module/aerospike/namespace/namespace.go @@ -0,0 +1,93 @@ +package namespace + +import ( + "strings" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/metricbeat/mb" + "github.com/elastic/beats/metricbeat/module/aerospike" + "github.com/pkg/errors" + + as "github.com/aerospike/aerospike-client-go" +) + +// init registers the MetricSet with the central registry. +// The New method will be called after the setup of the module and before starting to fetch data +func init() { + if err := mb.Registry.AddMetricSet("aerospike", "namespace", New); err != nil { + panic(err) + } +} + +// MetricSet type defines all fields of the MetricSet +// As a minimum it must inherit the mb.BaseMetricSet fields, but can be extended with +// additional entries. These variables can be used to persist data or configuration between +// multiple fetch calls. +type MetricSet struct { + mb.BaseMetricSet + client *as.Client +} + +// New create a new instance of the MetricSet +// Part of new is also setting up the configuration by processing additional +// configuration entries if needed. +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + + config := struct{}{} + + logp.Experimental("The aerospike namespace metricset is experimental") + + if err := base.Module().UnpackConfig(&config); err != nil { + return nil, err + } + + host, err := aerospike.ParseHost(base.Host()) + if err != nil { + return nil, errors.Wrap(err, "Invalid host format, expected hostname:port") + } + + client, err := as.NewClientWithPolicyAndHost(as.NewClientPolicy(), host) + if err != nil { + return nil, err + } + + return &MetricSet{ + BaseMetricSet: base, + client: client, + }, nil +} + +// Fetch methods implements the data gathering and data conversion to the right format +// It returns the event which is then forward to the output. In case of an error, a +// descriptive error must be returned. +func (m *MetricSet) Fetch() ([]common.MapStr, error) { + var events []common.MapStr + + for _, node := range m.client.GetNodes() { + info, err := as.RequestNodeInfo(node, "namespaces") + if err != nil { + logp.Err("Failed to retrieve namespaces from node %s", node.GetName()) + continue + } + + for _, namespace := range strings.Split(info["namespaces"], ";") { + info, err := as.RequestNodeInfo(node, "namespace/"+namespace) + if err != nil { + logp.Err("Failed to retrieve metrics for namespace %s from node %s", namespace, node.GetName()) + continue + } + + data, _ := schema.Apply(aerospike.ParseInfo(info["namespace/"+namespace])) + data["name"] = namespace + data["node"] = common.MapStr{ + "host": node.GetHost().String(), + "name": node.GetName(), + } + + events = append(events, data) + } + } + + return events, nil +} diff --git a/metricbeat/module/aerospike/namespace/namespace_integration_test.go b/metricbeat/module/aerospike/namespace/namespace_integration_test.go new file mode 100644 index 00000000000..954541fc40a --- /dev/null +++ b/metricbeat/module/aerospike/namespace/namespace_integration_test.go @@ -0,0 +1,26 @@ +// +build integration + +package namespace + +import ( + "testing" + + mbtest "github.com/elastic/beats/metricbeat/mb/testing" + "github.com/elastic/beats/metricbeat/module/aerospike" +) + +func TestData(t *testing.T) { + f := mbtest.NewEventsFetcher(t, getConfig()) + err := mbtest.WriteEvents(f, t) + if err != nil { + t.Fatal("write", err) + } +} + +func getConfig() map[string]interface{} { + return map[string]interface{}{ + "module": "aerospike", + "metricsets": []string{"namespace"}, + "hosts": []string{aerospike.GetAerospikeEnvHost() + ":" + aerospike.GetAerospikeEnvPort()}, + } +} diff --git a/metricbeat/module/aerospike/testing.go b/metricbeat/module/aerospike/testing.go new file mode 100644 index 00000000000..e66df0e86aa --- /dev/null +++ b/metricbeat/module/aerospike/testing.go @@ -0,0 +1,31 @@ +package aerospike + +import ( + "os" +) + +// Helper functions for testing used in the aerospike MetricSets. + +// GetAerospikeEnvHost returns the hostname of the Aerospike server to use for +// testing. It reads the value from the AEROSPIKE_HOST environment variable and +// returns localhost if it is not set. +func GetAerospikeEnvHost() string { + host := os.Getenv("AEROSPIKE_HOST") + + if len(host) == 0 { + host = "localhost" + } + return host +} + +// GetAerospikeEnvPort returns the port of the Aerospike server to use for +// testing. It reads the value from the AEROSPIKE_PORT environment variable and +// returns 3000 if it is not set. +func GetAerospikeEnvPort() string { + port := os.Getenv("AEROSPIKE_PORT") + + if len(port) == 0 { + port = "3000" + } + return port +} diff --git a/metricbeat/tests/system/test_aerospike.py b/metricbeat/tests/system/test_aerospike.py new file mode 100644 index 00000000000..6106a6c6e1d --- /dev/null +++ b/metricbeat/tests/system/test_aerospike.py @@ -0,0 +1,37 @@ +import os +import metricbeat +import unittest +from nose.plugins.attrib import attr + +AEROSPIKE_FIELDS = metricbeat.COMMON_FIELDS + ["aerospike"] + + +class Test(metricbeat.BaseTest): + + @unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test") + def test_namespace(self): + """ + aerospike namespace metricset test + """ + self.render_config_template(modules=[{ + "name": "aerospike", + "metricsets": ["namespace"], + "hosts": self.get_hosts(), + "period": "1s" + }]) + proc = self.start_beat() + self.wait_until(lambda: self.output_lines() > 0) + proc.check_kill_and_wait() + self.assert_no_logged_warnings() + + output = self.read_output_json() + self.assertEqual(len(output), 1) + evt = output[0] + + self.assertItemsEqual(self.de_dot(AEROSPIKE_FIELDS), evt.keys()) + + self.assert_fields_are_documented(evt) + + def get_hosts(self): + return [os.getenv('AEROSPIKE_HOST', 'localhost') + ':' + + os.getenv('AEROSPIKE_PORT', '3000')] diff --git a/vendor/github.com/aerospike/aerospike-client-go/CHANGELOG.md b/vendor/github.com/aerospike/aerospike-client-go/CHANGELOG.md new file mode 100644 index 00000000000..9139a3027e0 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/CHANGELOG.md @@ -0,0 +1,913 @@ +# Change History + +## April 25 2017: v1.27.0 + + Feature, Performance improvements and bug fix release. + + * **New Features** + + - Added `BatchGetObjects` method. + - Added Exponential Backoff by introducing `BasePolicy.SleepMultiplier`. Only Values > 1.0 are effective. PR #192, thanks to [Venil Noronha](https://github.com/venilnoronha) + + * **Improvements** + + - Packer tries to see if it can use generic data types before using reflection. + - Operations, including CDTs do not allocate a buffer anymore, unless reused. + + * **Incompatible changes**: + - `BinName` and `BinValue` are not exported in `Operation` anymore. These fields shouldn't have been used anyway since `Operation`s used to cache their internal command. + + * **Fixes** + + - Documentation Fixes. Thanks to [Nassor Paulino da Silva](https://github.com/nassor) and [HArmen](https://github.com/alicebob) + + +## April 5 2017: v1.26.0 + + Feature, Performance improvements and bug fix release. + + * **New Features** + + - Predicate API is supported (for server v3.12+) + - Added `Truncate` method to quickly remove all data from namespaces or sets (for server v3.12+). + - Support `ScanPolicy.ServerSocketTimeout` (for server v3.12+). + - Support `ClientPolicy.IgnoreOtherSubnetAliases` to ignore hosts from other subnets. PR #182, thanks to [wedi-dev](https://github.com/wedi-dev) + + * **Improvements** + + - Added a lot of predefined generic slice and map types in `NewValue` method to avoid hitting reflection as much as possible. + - Fix `go vet` complaints. + + * **Fixes** + + - Allow streaming commands (scan/query/aggregation) to retry unless the error occurs during parsing of the results. Fixes issue #187 + - Use `net.JoinHostPort` to concatinate host and port values instead of doing it directly. Fixes some issues in IPv6 connection strings. + - Improved initial Tend run. + - Fixes `cluster-name` checking bug. + +## March 8 2017: v1.25.1 + + Hot fix release. Updating the client is recommended. + + * **Fixes** + + - Fixed an issue where errors in Scan/Query unmarshalling would be duplicated and could cause a deadlock. + +## February 28 2017: v1.25.0 + + Performance improvements and fix release. + + * **Improvements** + + - Check tend duration and compare it to tend interval, and warn the user if tend takes longer than tend interval. + - Seed the cluster concurrently, and return as soon as any of the seeds is validated. + - Tend the cluster concurrently. Allows use of very big clusters with no delay. + - Partitions the connection queue to avoid contention. + - Cluster partition map is merged from all node fragments and updated only once per tend to reduce contention to absolute minimum. + + * **Fixes** + + - Fixed an issue where a valid but unreachable seed could timeout and stall connecting and tending the cluster.. + - Fix result code comments. + +## January 11 2017: v1.24.0 + + Minor feature and fix release. + + * **New Features** + + - TLS/SSL connections are now officially supported. + - Added Role/Privilege API. + + * **Improvements** + + - Return a client-side error when no ops are passed to the operate command. + - Export error attribute in `NodeError` + - Do not attempt to refresh peers if it is not supported by the nodes. + + * **Fixes** + + - Use namespace default-ttl for tests instead of assuming 30d + - Always drain scan connections after parsing the records. + - Fix panic in GetObject() if all bins in result is nil. PR #172, thanks to [Hamper](https://github.com/hamper) + - Fix WritePolicy usage with UDF. PR #174, thanks to [Bertrand Paquet](https://github.com/bpaquet) + - Close connection right when it has an io error and don't wait for the caller. + +## December 20 2016 : v1.23.0 + + Minor feature and fix release. + + * **New Features** + + - Exposes the internal `client.Cluster` object to the users. + - Added New API for high-performance complex data type packing, and removed the old API. + + * **Improvements** + + - Only update the partition map if the partition generatio has changed. + - Use tend connection for user management commands. + - Marks LargeList as deprecated. Use CDT methods instead. + - Always validate the message header to avoid reading the remainder of other command buffers. + - Removes GeoJson from key helper. + - Improves tend algorthm to allow complete disconnection from the cluster if none of the clusters are accessible. + - `PutObject` method will now accept objects as well. PR #156, thanks to [Sarath S Pillai](https://github.com/sarathsp06) + + * **Fixes** + + - Do not attemp to add a node which were unaccessible to avoid panic. + - Fix invalid connectionCount. PR #168, thanks to [Jun Kimura](https://github.com/bluele) + - Fixes minor bug that didn't return the error on reading from the connection during scans. + +## November 29 2016 : v1.22.0 + + Hot fix release. Please upgrade if you have been using other aerospike clients with your database parallel to Go. + + * **Fixes** + + - Fixes an issue where short strings in Lists and Maps wouldn't unpack correctly. Resolves #161. + +## November 16 2016 : v1.21.0 + + Minor fix release. + + * **New Features** + + - Added new constants for expiration in `WritePolicy`: `TTLServerDefault`, `TTLDontExpire`, `TTLDontUpdate` + + * **Improvements** + + - Corrects typos in the code. PR #142, thanks to [Muyiwa Olurin ](https://github.com/muyiwaolurin) + - Use the tend connection for `RequestInfo` commands. + + * **Fixes** + + - Fixes an issue where TTL values were calcualted wrongly when they were set not to expire. + - Fixes an issue where `PutObjects` would marshal `[]byte` to `List` in database. PR #152, thanks to [blide](https://github.com/blide) + - Fixes an issue where `Recordset` could leak goroutines. PR #153, thanks to [Deepak Prabhakara](https://github.com/deepakprabhakara) + +## October 25 2016 : v1.20.0 + + Major improvements release. There has been major changes in the library. Please test rigorously before upgrading to the new version. + + * **New Features** + + - Let user define the desired tag for bin names in structs using `SetAerospikeTag` function. + - Added `as_performance` build tag to avoid including the slow convenience API which uses reflections in the client code. + To use this feature, you should include -tags="as_performance" when building your project. + + *NOTICE*: Keep in mind that your code may not compile using this flag. That is by design. + + * **Improvements** + + - Added special packer for map[string]interface{} in `NewValue` method. + - Avoid allocating memory for Map and List values. + - Allocate commands on the stack to avoid heap allcations. + - Avoid allocating memory for `packer`. + - Avoid Allocating memory in computeHash for keys. + - Avoid allocating memory in Ripe160MD digest. + - Removed BufferPool and moved buffers to `Connection` objects to remove lock contention. + - Added `ListIter` and `MapIter` interfaces to support passing Maps and Lists to the client without using reflection. + +## October 14 2016 : v1.19.0 + + Major feature and improvement release. + + * **New Features** + + * Support TLS secured connections. (Feature will be supported in coming server releases.) + + * Support IPv6 protocol. Supported by Aerospike Server 3.10+. + + * Support `cluster-name` verification. Supported by Aerospike Server 3.10+. + + * Support new peers info protocol. Supported by Aerospike Server 3.10+. + + * **Improvements** + + * Will retry the operation even when reading from the buffer. Set `Policy.MaxRetries = 0` to avoid this behavior. PR #143, thanks to [Hector Jusforgues](https://github.com/hectorj) + + * Much improved cluster management algorithm. Will now handle the case where multiple nodes go down simultaneously, still protecting against split brain rogue nodes. + + * **Fixes** + + * Try all alias IPs in node validator. Resolves #144. + + * Updated job status check for execute tasks. + +## August 19 2016 : v1.18.0 + + Minor improvements release. + + * **New Features** + + * Support 'Durable Deletes' for the next version of Aerospike Server Enterprise. + + * **Improvements** + + * Don't run tests for features that are not supported by the server. + + * Added new server error codes. + + +## July 27 2016 : v1.17.1 + + Minor improvements release. + + * **Improvements** + + * Add `TaskId()` method for `Recordset`. + + * Cleanup indexes after test cases. + + * Keep connections on recoverable server errors. + + * Return the error on unexpected keys in `BatchCommandGet/Header`. + + * Use the same client object in tests and support using replicas on travis. + +## July 19 2016 : v1.17.0 + + Major feature and improvement release. + + * **New Features** + + * Client now supports distributing reads from Replicas using `ClientPolicy.RequestProleReplicas` and `Policy.ReplicaPolicy` + + * **Improvements** + + * `Cluster.GetConnection` will now retry to acquire a connection until timeout. + + * `Client.DropIndex` method now blocks until all nodes report the index is dropped. + + * Async tasks like `CreateIndex` will retry a few times before deciding a non-existing job means it has finished. + + * Don't use math.MaxInt64, it breaks 32-bit builds. PR #139, thanks to [Cameron Sparr](https://github.com/sparrc) + + * **Fixes** + + * Maps with 0 elements will automatically shortcut to unordered empty maps. + + * Return the error in BatchCommandGet on parse error. + +## June 28 2016 : v1.16.3 + + Major bugfix release. Update recommended. + + * **Improvements** + + * Skip LDT tests if LDT is not enabled. + + * Returns last error after all retry attempts to run a command are exhausted. + + * Reserves a connection for tend operation to avoid dropping a node when high load prevents acquiring a proper connection. + + * Added Finalizers to `Client` and `Recordset`. Both will be automatically closed by the GC. + + * **Fixes** + + * Fixes an issue where `services-alternate` wasn't used in `Node.addFriends()` when instructed so in the policy. + + * Fixes an issue where object metadata wasn't cached if `QueryObjects` was called before `PutObject`. + + * Fixes an issue where idle connections were not dropped. + + * Fixes an issue where requested buffer sizes were not guarded against negative numbers. + +## June 7 2016 : v1.16.2 + + Minor bugfix release. + + * **Fixes** + + * Fixes an issue where empty unordered maps were confused with CDT maps. + +## June 6 2016 : v1.16.1 + + Minor bugfix release. + + * **Fixes** + + * Fixes an issue where complex maps and lists weren't unmarshalled correctly in `GetObject` method. + +## June 2 2016 : v1.16 + + Major feature and improvements release. + + > NOTICE: Due to the relatively extensive code overhaul, upgrade with caution. + + * **New Features** + + * Added CDT Ordered Map API. (Requires server v3.8.3+) + + * **Improvements** + + * Removed mutexes from `Cluster` and `Node` code. + + * Improved code quality using various linters. + +## May 27 2016 : v1.15 + + Minor fixes and improvements release. + + * **Fixes** + + * Fixed an issue where unmarshalling embedded structs and pointers didn't work properly if they were tagged. + +## May 16 2016 : v1.14 + + Minor fixes and improvements release. + + * **Fixes** + + * Fixed an issue in which go-routines were leaked in `Results()` method of `Recordset` on cancellation. Based on PR #128, thanks to [Noel Cower](https://github.com/nilium) + + * Fixed issues regarding leaked goroutines in `Cluster.WaitTillStablized()`, `Cluster.MigrationInProgress()`, and `Cluster.WaitUntillMigrationIsFinished()` methods. PR #126, thanks to [Anton](https://github.com/yiiton) + + * **Improvements** + + * Improved cluster `tend()` logic. + + * Added `Recordset.Read()` method. + + * Minor fixes in docs and code formatting. Thanks to [Andrew Murray](https://github.com/radarhere) and [Erik Dubbelboer](https://github.com/erikdubbelboer) + +## April 1 2016 : v1.13 + + Minor features and improvements release. + + * **New Features** + + * Added `NewGeoWithinRegionForCollectionFilter`, `NewGeoRegionsContainingPointForCollectionFilter`, `NewGeoWithinRadiusForCollectionFilter` for queries on collection bins. + + * **Fixes** + + * Fixed an issue in which bounded byte arrays were silently being dropped as map keys. + + * **Improvements** + + * Removed and fixed unused assignments and variables. + + * Fixed typos in the comments. + + * Minor changes and formatting. PR #124, thanks to [Harmen](https://github.com/alicebob) + +## March 8 2016 : v1.12 + + Minor features and improvements release. + + * **New Features** + + * Support Metadata in struct tags to fetch TTL and Generation via `GetObject`. + Notice: Metadata attributes in an struct are considered transient, and won't be persisted. + + Example: + ```go + type SomeStruct struct { + TTL uint32 `asm:"ttl"` // record time-to-live in seconds + Gen uint32 `asm:"gen"` // record generation + A int + Self *SomeStruct + } + + key, _ := as.NewKey("ns", "set", value) + err := client.PutObject(nil, key, obj) + // handle error here + + rObj := &OtherStruct{} + err = client.GetObject(nil, key, rObj) + ``` + + * GeoJSON support in Lists and Maps + + * **Improvements** + + * Use `ClientPolicy.timeout` for connection timeout when refreshing nodes + + * Added new server error codes + + * Protect RNG pool against low-precision clocks during init + + * Better error message distingushing between timeout because of reaching deadline and exceeding maximum retries + + * **Fixes** + + * Fixed object mapping cache for anonymous structs. PR #115, thanks to [Moshe Revah](https://github.com/zippoxer) + + * Fixed an issue where `Execute()` method wasn't observing the `SendKey` flag in Policy. + +## February 9 2016 : v1.11 + + Minor features and improvements release. + + * **New Features** + + * Can now use `services-alternate` for cluster tend. + + * New CDT List API: `ListGetRangeFromOp`, `ListRemoveRangeFromOp`, `ListPopRangeFromOp` + + * **Improvements** + + * Improves marshalling of data types into and out of the Lua library and avoids marshalling values before they are needed. + + * Returns error for having more than one Filter on client-side to avoid confusion. + + * Increases default `ClientPolicy.Timeout` and return a meaningful error message when the client is not fully connected to the cluster after `waitTillStabilized` call + +## January 13 2016 : v1.10 + + Major release. Adds Aggregation. + + * **New Features** + + * Added `client.QueryAggregate` method. + + * For examples regarding how to use this feature, look at the examples directory. + + * You can find more documentation regarding the [Aggregation Feature on Aerospike Website](http://www.aerospike.com/docs/guide/aggregation.html) + + * **Improvements** + + * Improve Query/Scan performance by reading from the socket in bigger chunks + +## December 14 2015 : v1.9 + + Major release. Adds new features. + + * **New Features** + + * Added CDT List operations. + + * Added `NewGeoWithinRadiusFilter` filter for queries. + + * **Changes** + + * Renamed `NewGeoPointsWithinRegionFilter` to `NewGeoWithinRegionFilter` + +## December 1 2015 : v1.8 + + Major release. Adds new features and fixes important bugs. + + * **New Features** + + * Added `ScanAllObjects`, `ScanNodeObjects`, `QueryObjects` and `QueryNodeObjects` to the client, to facilitate automatic unmarshalling of data similar to `GetObject`. + + * NOTICE: This feature and its API are experimental, and may change in the future. Please test your code throughly, and provide feedback via Github. + + * Added `ScanPolicy.IncludeLDT` option (Usable with yet to be released server v 3.7.0) + + * Added `LargeList.Exist` method. + + * **Improvements** + + * Makes Generation and Expiration values consistent for WritePolicy and Record. + + * NOTICE! BREAKING CHANGE: Types of `Record.Generation` and `Record.Expiration`, and also `WritePolicy.Generation` and `WritePolicy.Expiration` have changed, and may require casting in older code. + + * Refactor tools/asinfo to be more idiomatic Go. PR #86, thanks to [Tyler Gibbons](https://github.com/Kavec) + + * Many documentation fixes thanks to [Charl Matthee](https://github.com/charl) and [Tyler Gibbons](https://github.com/Kavec) + + * **Fixes** + + * Changed the `KeepConnection` logic from black-list to white-list, to drop all + + * Fix RemoveNodesCopy logic error. + + * Add missing send on recordset Error channel. PR #99, thanks to [Geert-Johan Riemer](https://github.com/GeertJohan) + + * Fix skipping of errors/records in (*recordset).Results() select after cancellation. PR #99, thanks to [Geert-Johan Riemer](https://github.com/GeertJohan) + +## October 16 2015 : v1.7 + + Major release. Adds new features and fixes important bugs. + + * **New Features** + + * Added support for Geo spatial queries. + + * Added support for creating indexes on List and Map bins, and querying them. + + * Added support for native floating point values. + + * Added `ClientPolicy.IpMap` to use IP translation for alias recognition. PR #81, Thanks to [Christopher Guiney](https://github.com/chrisguiney) + + * **Improvements** + + * Cosmetic change to improve code consistency for `PackLong` in `packer.go`. PR #78, Thanks to [Erik Dubbelboer](https://github.com/ErikDubbelboer) + + * **Fixes** + + * Fixes an issue when the info->services string was malformed and caused the client to panic. + + * Fixes an issue with unmarshalling maps of type map[ANY]struct{} into embedded structs. + + * Fixes issue with unmarshalling maps of type map[ANY]struct{} into embedded structs. + + * Fixes an issue with bound checking. PR #85, Thanks to [Tait Clarridge](https://github.com/oldmantaiter) + + * Fixes aa few typos in the docs. PR #76, Thanks to [Charl Matthee](https://github.com/charl) + +## August 2015 : v1.6.5 + + Minor maintenance release. + + * **Improvements** + + * Export `MaxBufferSize` to allow tweaking of maximum buffer size allowed to read a record. If a record is bigger than this size (e.g: A lot of LDT elements in scan), this setting wil allow to tweak the buffer size. + +## July 16 2015 : v1.6.4 + + Hot fix release. + + * **Fixes** + + * Fix panic when a scan/query fails and the connection is not dropped. + +## July 9 2015 : v1.6.3 + + Minor fix release. + + * **Improvements** + + * Improved documentation. PR #64 and #68. Thanks to [Geert-Johan Riemer](https://github.com/GeertJohan) + + * **Fixes** + + * Fix a bunch of golint notices. PR #69, Thanks to [Geert-Johan Riemer](https://github.com/GeertJohan) + + * Connection.Read() total bytes count on error. PR #71, Thanks to [Geert-Johan Riemer](https://github.com/GeertJohan) + + * Fixed a race condition on objectMappings map. PR #72, Thanks to [Geert-Johan Riemer](https://github.com/GeertJohan) + + * Fixed a few uint -> int convertions. + +## June 11 2015 : v1.6.2 + + Minor fix release. + + * **Improvements** + + * Improved documentation. Replaced all old API references regarding Recordset/Query/Scan to newer, more elegant API. + + * **Fixes** + + * Fixed an issue where erroring out on Scan would result a panic. + + * Fixed an issue where `Statement.TaskId` would be negative. converted `Statement.TaskId` to `uint64` + +## June 9 2015 : v1.6.1 + + Minor fix release. + + * **Fixes** + + * Fixed an issue where marshaller wouldn't marshal some embedded structs. + + * Fixed an issue where querying/scanning empty sets wouldn't drain the socket before return. + +## May 30 2015 : v1.6.0 + + There's an important performance regression bug fix in this release. We recommend everyone to upgrade. + + * **New Features** + + * Added New LargeList API. + + * NOTICE! BREAKING CHANGE: New LargeList API on the Go Client uses the New API defined on newer server versions. As Such, it has changed some signatures in LargeList. + + * **Fixes** + + * Fixed an issue where connections where not put back to the pool on some non-critical errors. + + * Fixed an issue where Object Unmarshaller wouldn't extend a slice. + + * Decode RegisterUDF() error message from base64 + + * Fixed invalid connection handling on node connections (thanks to @rndive) + +## May 15 2015 : v1.5.2 + + Hotfix release. + + * **Fixes** + + * Fixed a branch-merge mistake regarding error handling during connection authentication. + +## May 15 2015 : v1.5.1 + + Major maintenance release. + + NOTICE: All LDTs on server other than LLIST have been deprecated, and will be removed in the future. As Such, all API regarding those features are considered deprecated and will be removed in tandem. + + * **Improvements** + + * Introduces `ClientPolicy.IdleTimeout` to close stale connections to the server. Thanks to Mário Freitas (@imkira). PR #57 + + * Use type alias instead of struct for NullValue. + + * Removed workaround regarding filtering bin names on the client for `BatchGet`. Issue #60 + + * **Fixes** + + * Fixed a few race conditions. + + * Fixed #58 regarding race condition accessing `Cluster.password`. + + * Fixed minor bugs regarding handling of nulls in structs for `GetObj()` and `PutObj()`. + + * Fixed a bug regarding setting TaskIds on the client. + + * ** Other Changes ** + + * Removed deprecated `ReplaceRoles()` method. + + * Removed deprecated `SetCapacity()` and `GetCapacity()` methods for LDTs. + +## April 13 2015 : v1.5.0 + + This release includes potential BREAKING CHANGES. + + * **New Features** + + * Introduces `ClientPolicy.LimitConnectionsToQueueSize`. If set to true, the client won't attemp to create new connections to the node if the total number of pooled connections to the node is equal or more than the pool size. The client will retry to poll a connection from the queue until a timeout occurs. If no timeout is set, it will only retry for ten times. + + * **Improvements** + + * BREAKING CHANGE: | + Uses type aliases instead of structs in several XXXValue methods. This removes a memory allocation per `Value` usage. + Since every `Put` operation uses at list one value object, this has the potential to improve application performance. + Since the signature of several `NewXXXValue` methods have changed, this might break some existing code if you have used the value objects directly. + + * Improved `Logger` so that it will accept a generalized `Logger` interface. Any Logger with a `Printf(format string, values ...interface{})` method can be used. Examples include Logrus. + + * Improved `Client.BatchGet()` performance. + + * **Fixes** + + * Bin names were ignored in BatchCommands. + + * `BatchCommandGet.parseRecord()` returned wrong values when `BinNames` was empty but not nil. + +## March 31 2015 : v1.4.2 + + Maintenance release. + + * **Improvements** + + * Replace channel-based queue system with a lock-based algorithm. + * Marshaller now supports arrays of arbitrary types. + * `Client.GetObject()` now returns an error when the object is not found. + * Partition calculation uses a trick that is twice as fast. + + * **Improvements** + + * Unpacking BLOBs resulted in returning references to pooled buffers. Now copies are returned. + +## March 12 2015 : v1.4.1 + + This is a minor release to help improve the compatibility of the client on Mac OS, and to make cross compilation easier. + + * **Improvements** + + * Node validator won't call net.HostLookup if an IP is passed as a seed to it. + +## Feb 17 2015 : v1.4.0 + + This is a major release, and makes using the client much easier to develop applications. + + * **New Features** + + * Added Marshalling Support for Put and Get operations. Refer to [Marshalling Test](client_object_test.go) to see how to take advantage. + Same functionality for other APIs will follow soon. + Example: + ```go + type SomeStruct struct { + A int `as:"a"` // alias the field to a + Self *SomeStruct `as:"-"` // will not persist the field + } + + type OtherStruct struct { + i interface{} + OtherObject *OtherStruct + } + + obj := &OtherStruct { + i: 15, + OtherObject: OtherStruct {A: 18}, + } + + key, _ := as.NewKey("ns", "set", value) + err := client.PutObject(nil, key, obj) + // handle error here + + rObj := &OtherStruct{} + err = client.GetObject(nil, key, rObj) + ``` + + * Added `Recordset.Results()`. Consumers of a recordset do not have to implement a select anymore. Instead of: + ```go + recordset, err := client.ScanAll(...) + L: + for { + select { + case r := <-recordset.Record: + if r == nil { + break L + } + // process record here + case e := <-recordset.Errors: + // handle error here + } + } + ``` + + one should only range on `recordset.Results()`: + + ```go + recordset, err := client.ScanAll(...) + for res := range recordset.Results() { + if res.Err != nil { + // handle error here + } else { + // process record here + fmt.Println(res.Record.Bins) + } + } + ``` + + Use of the old pattern is discouraged and deprecated, and direct access to recordset.Records and recordset.Errors will be removed in a future release. + + * **Improvements** + + * Custom Types are now allowed as bin values. + +## Jan 26 2015 : v1.3.1 + + * **Improvements** + + * Removed dependency on `unsafe` package. + +## Jan 20 2015 : v1.3.0 + + * **Breaking Changes** + + * Removed `Record.Duplicates` and `GenerationPolicy/DUPLICATE` + + * **New Features** + + * Added Security Features: Please consult [Security Docs](https://www.aerospike.com/docs/guide/security.html) on Aerospike website. + + * `ClientPolicy.User`, `ClientPolicy.Password` + * `Client.CreateUser()`, `Client.DropUser()`, `Client.ChangePassword()` + * `Client.GrantRoles()`, `Client.RevokeRoles()`, `Client.ReplaceRoles()` + * `Client.QueryUser()`, `Client.QueryUsers` + + * Added `Client.QueryNode()` + + * Added `ClientPolicy.TendInterval` + + * **Improvements** + + * Cleaned up Scan/Query/Recordset concurrent code + + * **Fixes** + + * Fixed a bug in `tools/cli/cli.go`. + + * Fixed a bug when `GetHeaderOp()` would always translate into `GetOp()` + +## Dec 29 2014: v1.2.0 + + * **New Features** + + * Added `NewKeyWithDigest()` method. You can now create keys with custom digests, or only using digests without + knowing the original value. (Useful when you are getting back results with Query and Scan) + +## Dec 22 2014 + + * **New Features** + + * Added `ConsistencyLevel` to `BasePolicy`. + + * Added `CommitLevel` to `WritePolicy`. + + * Added `LargeList.Range` and `LargeList.RangeThenFilter` methods. + + * Added `LargeMap.Exists` method. + + * **Improvements** + + * We use a pooled XORShift RNG to produce random numbers in the client. It is FAST. + +## Dec 19 2014 + + * **Fixes** + + * `Record.Expiration` wasn't converted to TTL values on `Client.BatchGet`, `Client.Scan` and `Client.Query`. + +## Dec 10 2014 + + * **Fixes**: + + * Fixed issue when the size of key field would not be estimated correctly when WritePolicy.SendKey was set. + +## Nov 27 2014 + + Major Performance Enhancements. Minor new features and fixes. + + * **Improvements** + + * Go client is much faster and more memory efficient now. + In some workloads, it competes and wins against C and Java clients. + + * Complex objects are now de/serialized much faster. + + * **New Features** + + * Added Default Policies for Client object. + Instead of creating a new policy when the passed policy is nil, default policies will be used. + +## Nov 24 2014 + + * **Fixes**: + + * Fixed issue when WritePolicy.SendKey = true was not respected in Touch() and Operate() + +## Nov 22 2014 + + Hotfix in unpacker. Update strongly recommended for everyone using Complex objects, LDTs and UDFs. + + * **Fixes**: + + * When Blob, ByteArray or String size has a bit sign set, unpacker reads it wrong. + Note: This bug only affects unpacking of these objects. Packing was unaffected, and data in the database is valid. + +## Nov 2 2014 + + Minor, but very impoortant fix. + + * **Fixes**: + + * Node selection in partition map was flawed on first refresh. + + * **Incompatible changes**: + + * `Expiration` and `Generation` in `WritePolicy` are now `int32` + * `TaskId` in `Statement` is now always set in the client, and is `int64` + + * **New Features**: + + * float32, float64 and bool are now supported in map and array types + +## Oct 15 2014 (Beta 2) + + * **Hot fix**: + + * Fixed pack/unpack for uint64 + +## Aug 20 2014 (Beta 1) + + Major changes and improvements. + + * **New Features**: + + * Added client.Query() + * Added client.ScanNode()/All() + * Added client.Operate() + * Added client.CreateIndex() + * Added client.DropIndex() + * Added client.RegisterUDF() + * Added client.RegisterUDFFromFile() + * Added client.Execute() + * Added client.ExecuteUDF() + * Added client.BatchGet() + * Added client.BatchGetHeader() + * Added client.BatchExists() + * Added LDT implementation + * Added `Node` and `Key` references to the Record + + * **Changes**: + + * Many minor and major bug fixes + * Potentially breaking change: Reduced Undocumented API surface + * Fixed a few places where error results were not checked + * Breaking Change: Convert Key.namespace & Key.setName from pointer to string; affects Key API + * Renamed all `this` receivers to appropriate names + * Major performance improvements (~2X improvements in speed and memory consumption): + * better memory management for commands; won't allocate if capacity is big enough + * better hash management in key; avoids two redundant memory allocs + * use a buffer pool to reduce GC load + * fine-grained, customizable and deterministic buffer pool implementation for command + + * Optimizations for Key & Digest + * changed digest implementation, removed an allocation + * Added RIPEMD160 hash files from crypto to lib + * pool hash objects + + * Various Benchmark tool improvements + * now profileable using localhost:6060 + * minor bug fixes + +## Jul 26 2014 (Alpha) + + * Initial Release. diff --git a/vendor/github.com/aerospike/aerospike-client-go/LICENSE b/vendor/github.com/aerospike/aerospike-client-go/LICENSE new file mode 100644 index 00000000000..1006ac103fa --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014-2016 Aerospike, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aerospike/aerospike-client-go/README.md b/vendor/github.com/aerospike/aerospike-client-go/README.md new file mode 100644 index 00000000000..cdb59c6e958 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/README.md @@ -0,0 +1,160 @@ +# Aerospike Go Client + +[![Aerospike Client Go](https://goreportcard.com/badge/github.com/aerospike/aerospike-client-go)](https://goreportcard.com/report/github.com/aerospike/aerospike-client-go) + +An Aerospike library for Go. + +This library is compatible with Go 1.5+ and supports the following operating systems: Linux, Mac OS X (Windows builds are possible, but untested) + +Please refer to [`CHANGELOG.md`](CHANGELOG.md) if you encounter breaking changes. + +- [Usage](#Usage) +- [Prerequisites](#Prerequisites) +- [Installation](#Installation) +- [Tweaking Performance](#Performance) +- [Benchmarks](#Benchmarks) +- [API Documentaion](#API-Documentation) +- [Tests](#Tests) +- [Examples](#Examples) + - [Tools](#Tools) + + +## Usage: + +The following is a very simple example of CRUD operations in an Aerospike database. + +```go +package main + +import ( + "fmt" + + . "github.com/aerospike/aerospike-client-go" +) + +func panicOnError(err error) { + if err != nil { + panic(err) + } +} + +func main() { + // define a client to connect to + client, err := NewClient("127.0.0.1", 3000) + panicOnError(err) + + key, err := NewKey("test", "aerospike", "key") + panicOnError(err) + + // define some bins with data + bins := BinMap{ + "bin1": 42, + "bin2": "An elephant is a mouse with an operating system", + "bin3": []interface{}{"Go", 2009}, + } + + // write the bins + err = client.Put(nil, key, bins) + panicOnError(err) + + // read it back! + rec, err := client.Get(nil, key) + panicOnError(err) + + fmt.Printf("%#v\n", *rec) + + // delete the key, and check if key exists + existed, err := client.Delete(nil, key) + panicOnError(err) + fmt.Printf("Record existed before delete? %v\n", existed) +} +``` + +More examples illustrating the use of the API are located in the +[`examples`](examples) directory. + +Details about the API are available in the [`docs`](docs) directory. + + +## Prerequisites + +[Go](http://golang.org) version v1.5+ is required. + +To install the latest stable version of Go, visit +[http://golang.org/dl/](http://golang.org/dl/) + + +Aerospike Go client implements the wire protocol, and does not depend on the C client. +It is goroutine friendly, and works asynchronously. + +Supported operating systems: + +- Major Linux distributions (Ubuntu, Debian, Red Hat) +- Mac OS X +- Windows (untested) + + +## Installation: + +1. Install Go 1.5+ and setup your environment as [Documented](http://golang.org/doc/code.html#GOPATH) here. +2. Get the client in your ```GOPATH``` : ```go get github.com/aerospike/aerospike-client-go``` + * To update the client library: ```go get -u github.com/aerospike/aerospike-client-go``` + +Using [gopkg.in](https://gopkg.in/) is also supported: `go get -u gopkg.in/aerospike/aerospike-client-go.v1` + +### Some Hints: + + * To run a go program directly: ```go run ``` + * to build: ```go build -o ``` + * example: ```go build -o benchmark tools/benchmark/benchmark.go``` + + +## Performance Tweaking + +We are bending all efforts to improve the client's performance. In our reference benchmarks, Go client performs almost as good as the C client. + +To read about performance variables, please refer to [`docs/performance.md`](docs/performance.md) + + +## Tests + +This library is packaged with a number of tests. Tests require Ginkgo and Gomega library. + +Before running the tests, you need to update the dependencies: + + $ go get . + +To run all the test cases with race detection: + + $ ginkgo -r -race + + + +## Examples + +A variety of example applications are provided in the [`examples`](examples) directory. + + +### Tools + +A variety of clones of original tools are provided in the [`tools`](tools) directory. +They show how to use more advanced features of the library to reimplement the same functionality in a more concise way. + + +## Benchmarks + +Benchmark utility is provided in the [`tools/benchmark`](tools/benchmark) directory. +See the [`tools/benchmark/README.md`](tools/benchmark/README.md) for details. + + +## API Documentation + +API documentation is available in the [`docs`](docs/README.md) directory. + +## License + +The Aerospike Go Client is made available under the terms of the Apache License, Version 2, as stated in the file `LICENSE`. + +Individual files may be made available under their own specific license, +all compatible with Apache License, Version 2. Please see individual files for details. + diff --git a/vendor/github.com/aerospike/aerospike-client-go/admin_command.go b/vendor/github.com/aerospike/aerospike-client-go/admin_command.go new file mode 100644 index 00000000000..7a9e645ab65 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/admin_command.go @@ -0,0 +1,651 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use acmd file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "encoding/binary" + "fmt" + "time" + + "github.com/aerospike/aerospike-client-go/pkg/bcrypt" + . "github.com/aerospike/aerospike-client-go/types" + Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" +) + +const ( + // Commands + _AUTHENTICATE byte = 0 + _CREATE_USER byte = 1 + _DROP_USER byte = 2 + _SET_PASSWORD byte = 3 + _CHANGE_PASSWORD byte = 4 + _GRANT_ROLES byte = 5 + _REVOKE_ROLES byte = 6 + _QUERY_USERS byte = 9 + _CREATE_ROLE byte = 10 + _DROP_ROLE byte = 11 + _GRANT_PRIVILEGES byte = 12 + _REVOKE_PRIVILEGES byte = 13 + _QUERY_ROLES byte = 16 + + // Field IDs + _USER byte = 0 + _PASSWORD byte = 1 + _OLD_PASSWORD byte = 2 + _CREDENTIAL byte = 3 + _ROLES byte = 10 + _ROLE byte = 11 + _PRIVILEGES byte = 12 + + // Misc + _MSG_VERSION int64 = 0 + _MSG_TYPE int64 = 2 + + _HEADER_SIZE int = 24 + _HEADER_REMAINING int = 16 + _RESULT_CODE int = 9 + _QUERY_END int = 50 +) + +type adminCommand struct { + dataBuffer []byte + dataOffset int +} + +func newAdminCommand(buf []byte) *adminCommand { + if buf == nil { + buf = make([]byte, 10*1024) + } + return &adminCommand{ + dataBuffer: buf, + dataOffset: 8, + } +} + +func (acmd *adminCommand) authenticate(conn *Connection, user string, password []byte) error { + + acmd.setAuthenticate(user, password) + if _, err := conn.Write(acmd.dataBuffer[:acmd.dataOffset]); err != nil { + return err + } + + if _, err := conn.Read(acmd.dataBuffer, _HEADER_SIZE); err != nil { + return err + } + + result := acmd.dataBuffer[_RESULT_CODE] + if result != 0 { + return NewAerospikeError(ResultCode(result), "Authentication failed") + } + + // bufPool.Put(acmd.dataBuffer) + + return nil +} + +func (acmd *adminCommand) setAuthenticate(user string, password []byte) int { + acmd.writeHeader(_AUTHENTICATE, 2) + acmd.writeFieldStr(_USER, user) + acmd.writeFieldBytes(_CREDENTIAL, password) + acmd.writeSize() + + return acmd.dataOffset +} + +func (acmd *adminCommand) createUser(cluster *Cluster, policy *AdminPolicy, user string, password []byte, roles []string) error { + acmd.writeHeader(_CREATE_USER, 3) + acmd.writeFieldStr(_USER, user) + acmd.writeFieldBytes(_PASSWORD, password) + acmd.writeRoles(roles) + return acmd.executeCommand(cluster, policy) +} + +func (acmd *adminCommand) dropUser(cluster *Cluster, policy *AdminPolicy, user string) error { + acmd.writeHeader(_DROP_USER, 1) + acmd.writeFieldStr(_USER, user) + return acmd.executeCommand(cluster, policy) +} + +func (acmd *adminCommand) setPassword(cluster *Cluster, policy *AdminPolicy, user string, password []byte) error { + acmd.writeHeader(_SET_PASSWORD, 2) + acmd.writeFieldStr(_USER, user) + acmd.writeFieldBytes(_PASSWORD, password) + return acmd.executeCommand(cluster, policy) +} + +func (acmd *adminCommand) changePassword(cluster *Cluster, policy *AdminPolicy, user string, password []byte) error { + acmd.writeHeader(_CHANGE_PASSWORD, 3) + acmd.writeFieldStr(_USER, user) + acmd.writeFieldBytes(_OLD_PASSWORD, cluster.Password()) + acmd.writeFieldBytes(_PASSWORD, password) + return acmd.executeCommand(cluster, policy) +} + +func (acmd *adminCommand) grantRoles(cluster *Cluster, policy *AdminPolicy, user string, roles []string) error { + acmd.writeHeader(_GRANT_ROLES, 2) + acmd.writeFieldStr(_USER, user) + acmd.writeRoles(roles) + return acmd.executeCommand(cluster, policy) +} + +func (acmd *adminCommand) revokeRoles(cluster *Cluster, policy *AdminPolicy, user string, roles []string) error { + acmd.writeHeader(_REVOKE_ROLES, 2) + acmd.writeFieldStr(_USER, user) + acmd.writeRoles(roles) + return acmd.executeCommand(cluster, policy) +} + +func (acmd *adminCommand) createRole(cluster *Cluster, policy *AdminPolicy, roleName string, privileges []Privilege) error { + acmd.writeHeader(_CREATE_ROLE, 2) + acmd.writeFieldStr(_ROLE, roleName) + if err := acmd.writePrivileges(privileges); err != nil { + return err + } + return acmd.executeCommand(cluster, policy) +} + +func (acmd *adminCommand) dropRole(cluster *Cluster, policy *AdminPolicy, roleName string) error { + acmd.writeHeader(_DROP_ROLE, 1) + acmd.writeFieldStr(_ROLE, roleName) + return acmd.executeCommand(cluster, policy) +} + +func (acmd *adminCommand) grantPrivileges(cluster *Cluster, policy *AdminPolicy, roleName string, privileges []Privilege) error { + acmd.writeHeader(_GRANT_PRIVILEGES, 2) + acmd.writeFieldStr(_ROLE, roleName) + if err := acmd.writePrivileges(privileges); err != nil { + return err + } + return acmd.executeCommand(cluster, policy) +} + +func (acmd *adminCommand) revokePrivileges(cluster *Cluster, policy *AdminPolicy, roleName string, privileges []Privilege) error { + acmd.writeHeader(_REVOKE_PRIVILEGES, 2) + acmd.writeFieldStr(_ROLE, roleName) + if err := acmd.writePrivileges(privileges); err != nil { + return err + } + return acmd.executeCommand(cluster, policy) +} + +func (acmd *adminCommand) queryUser(cluster *Cluster, policy *AdminPolicy, user string) (*UserRoles, error) { + // TODO: Remove the workaround in the future + time.Sleep(time.Millisecond * 10) + // defer bufPool.Put(acmd.dataBuffer) + + acmd.writeHeader(_QUERY_USERS, 1) + acmd.writeFieldStr(_USER, user) + list, err := acmd.readUsers(cluster, policy) + if err != nil { + return nil, err + } + + if len(list) > 0 { + return list[0], nil + } + + return nil, nil +} + +func (acmd *adminCommand) queryUsers(cluster *Cluster, policy *AdminPolicy) ([]*UserRoles, error) { + // TODO: Remove the workaround in the future + time.Sleep(time.Millisecond * 10) + // defer bufPool.Put(acmd.dataBuffer) + + acmd.writeHeader(_QUERY_USERS, 0) + list, err := acmd.readUsers(cluster, policy) + if err != nil { + return nil, err + } + return list, nil +} + +func (acmd *adminCommand) queryRole(cluster *Cluster, policy *AdminPolicy, roleName string) (*Role, error) { + // TODO: Remove the workaround in the future + time.Sleep(time.Millisecond * 10) + // defer bufPool.Put(acmd.dataBuffer) + + acmd.writeHeader(_QUERY_ROLES, 1) + acmd.writeFieldStr(_ROLE, roleName) + list, err := acmd.readRoles(cluster, policy) + if err != nil { + return nil, err + } + + if len(list) > 0 { + return list[0], nil + } + + return nil, nil +} + +func (acmd *adminCommand) queryRoles(cluster *Cluster, policy *AdminPolicy) ([]*Role, error) { + // TODO: Remove the workaround in the future + time.Sleep(time.Millisecond * 10) + // defer bufPool.Put(acmd.dataBuffer) + + acmd.writeHeader(_QUERY_ROLES, 0) + list, err := acmd.readRoles(cluster, policy) + if err != nil { + return nil, err + } + return list, nil +} + +func (acmd *adminCommand) writeRoles(roles []string) { + offset := acmd.dataOffset + int(_FIELD_HEADER_SIZE) + acmd.dataBuffer[offset] = byte(len(roles)) + offset++ + + for _, role := range roles { + len := copy(acmd.dataBuffer[offset+1:], role) + acmd.dataBuffer[offset] = byte(len) + offset += len + 1 + } + + size := offset - acmd.dataOffset - int(_FIELD_HEADER_SIZE) + acmd.writeFieldHeader(_ROLES, size) + acmd.dataOffset = offset +} + +func (acmd *adminCommand) writePrivileges(privileges []Privilege) error { + offset := acmd.dataOffset + int(_FIELD_HEADER_SIZE) + acmd.dataBuffer[offset] = byte(len(privileges)) + offset++ + + for _, privilege := range privileges { + code := privilege.code() + + acmd.dataBuffer[offset] = byte(code) + offset++ + + if privilege.canScope() { + + if len(privilege.SetName) > 0 && len(privilege.Namespace) == 0 { + return NewAerospikeError(INVALID_PRIVILEGE, fmt.Sprintf("Admin privilege '%v' has a set scope with an empty namespace.", privilege)) + } + + acmd.dataBuffer[offset] = byte(len(privilege.Namespace)) + offset++ + copy(acmd.dataBuffer[offset:], privilege.Namespace) + offset += len(privilege.Namespace) + + acmd.dataBuffer[offset] = byte(len(privilege.SetName)) + offset++ + copy(acmd.dataBuffer[offset:], privilege.SetName) + offset += len(privilege.SetName) + } else { + if len(privilege.Namespace) > 0 || len(privilege.SetName) > 0 { + return NewAerospikeError(INVALID_PRIVILEGE, fmt.Sprintf("Admin global rivilege '%v' can't have a namespace or set.", privilege)) + } + } + } + + size := offset - acmd.dataOffset - int(_FIELD_HEADER_SIZE) + acmd.writeFieldHeader(_PRIVILEGES, size) + acmd.dataOffset = offset + + return nil +} +func (acmd *adminCommand) writeSize() { + // Write total size of message which is the current offset. + var size = int64(acmd.dataOffset-8) | (_MSG_VERSION << 56) | (_MSG_TYPE << 48) + binary.BigEndian.PutUint64(acmd.dataBuffer[0:], uint64(size)) +} + +func (acmd *adminCommand) writeHeader(command byte, fieldCount int) { + // Authenticate header is almost all zeros + for i := acmd.dataOffset; i < acmd.dataOffset+16; i++ { + acmd.dataBuffer[i] = 0 + } + acmd.dataBuffer[acmd.dataOffset+2] = command + acmd.dataBuffer[acmd.dataOffset+3] = byte(fieldCount) + acmd.dataOffset += 16 +} + +func (acmd *adminCommand) writeFieldStr(id byte, str string) { + len := copy(acmd.dataBuffer[acmd.dataOffset+int(_FIELD_HEADER_SIZE):], str) + acmd.writeFieldHeader(id, len) + acmd.dataOffset += len +} + +func (acmd *adminCommand) writeFieldBytes(id byte, bytes []byte) { + copy(acmd.dataBuffer[acmd.dataOffset+int(_FIELD_HEADER_SIZE):], bytes) + acmd.writeFieldHeader(id, len(bytes)) + acmd.dataOffset += len(bytes) +} + +func (acmd *adminCommand) writeFieldHeader(id byte, size int) { + // Buffer.Int32ToBytes(int32(size+1), acmd.dataBuffer, acmd.dataOffset) + binary.BigEndian.PutUint32(acmd.dataBuffer[acmd.dataOffset:], uint32(size+1)) + + acmd.dataOffset += 4 + acmd.dataBuffer[acmd.dataOffset] = id + acmd.dataOffset++ +} + +func (acmd *adminCommand) executeCommand(cluster *Cluster, policy *AdminPolicy) error { + // TODO: Remove the workaround in the future + defer time.Sleep(time.Millisecond * 10) + + // defer bufPool.Put(acmd.dataBuffer) + + acmd.writeSize() + node, err := cluster.GetRandomNode() + if err != nil { + return nil + } + timeout := 1 * time.Second + if policy != nil && policy.Timeout > 0 { + timeout = policy.Timeout + } + + node.tendConnLock.Lock() + defer node.tendConnLock.Unlock() + + if err := node.initTendConn(timeout); err != nil { + return err + } + + conn := node.tendConn + if _, err := conn.Write(acmd.dataBuffer[:acmd.dataOffset]); err != nil { + return err + } + + if _, err := conn.Read(acmd.dataBuffer, _HEADER_SIZE); err != nil { + return err + } + + result := acmd.dataBuffer[_RESULT_CODE] + if result != 0 { + return NewAerospikeError(ResultCode(result)) + } + + return nil +} + +func (acmd *adminCommand) readUsers(cluster *Cluster, policy *AdminPolicy) ([]*UserRoles, error) { + acmd.writeSize() + node, err := cluster.GetRandomNode() + if err != nil { + return nil, err + } + timeout := 1 * time.Second + if policy != nil && policy.Timeout > 0 { + timeout = policy.Timeout + } + + node.tendConnLock.Lock() + defer node.tendConnLock.Unlock() + + if err := node.initTendConn(timeout); err != nil { + return nil, err + } + + conn := node.tendConn + if _, err := conn.Write(acmd.dataBuffer[:acmd.dataOffset]); err != nil { + return nil, err + } + + status, list, err := acmd.readUserBlocks(conn) + if err != nil { + return nil, err + } + + if status > 0 { + return nil, NewAerospikeError(ResultCode(status)) + } + return list, nil +} + +func (acmd *adminCommand) readUserBlocks(conn *Connection) (status int, rlist []*UserRoles, err error) { + + var list []*UserRoles + + for status == 0 { + if _, err = conn.Read(acmd.dataBuffer, 8); err != nil { + return -1, nil, err + } + + size := Buffer.BytesToInt64(acmd.dataBuffer, 0) + receiveSize := (size & 0xFFFFFFFFFFFF) + + if receiveSize > 0 { + if receiveSize > int64(len(acmd.dataBuffer)) { + acmd.dataBuffer = make([]byte, receiveSize) + } + if _, err = conn.Read(acmd.dataBuffer, int(receiveSize)); err != nil { + return -1, nil, err + } + status, list, err = acmd.parseUsers(int(receiveSize)) + if err != nil { + return -1, nil, err + } + rlist = append(rlist, list...) + } else { + break + } + } + return status, rlist, nil +} + +func (acmd *adminCommand) parseUsers(receiveSize int) (int, []*UserRoles, error) { + acmd.dataOffset = 0 + list := make([]*UserRoles, 0, 100) + + for acmd.dataOffset < receiveSize { + resultCode := int(acmd.dataBuffer[acmd.dataOffset+1]) + + if resultCode != 0 { + if resultCode == _QUERY_END { + return -1, nil, nil + } + return resultCode, nil, nil + } + + userRoles := &UserRoles{} + fieldCount := int(acmd.dataBuffer[acmd.dataOffset+3]) + acmd.dataOffset += _HEADER_REMAINING + + for i := 0; i < fieldCount; i++ { + len := int(Buffer.BytesToInt32(acmd.dataBuffer, acmd.dataOffset)) + acmd.dataOffset += 4 + id := acmd.dataBuffer[acmd.dataOffset] + acmd.dataOffset++ + len-- + + if id == _USER { + userRoles.User = string(acmd.dataBuffer[acmd.dataOffset : acmd.dataOffset+len]) + acmd.dataOffset += len + } else if id == _ROLES { + acmd.parseRoles(userRoles) + } else { + acmd.dataOffset += len + } + } + + if userRoles.User == "" && userRoles.Roles == nil { + continue + } + + if userRoles.Roles == nil { + userRoles.Roles = make([]string, 0) + } + list = append(list, userRoles) + } + + return 0, list, nil +} + +func (acmd *adminCommand) parseRoles(userRoles *UserRoles) { + size := int(acmd.dataBuffer[acmd.dataOffset]) + acmd.dataOffset++ + userRoles.Roles = make([]string, 0, size) + + for i := 0; i < size; i++ { + len := int(acmd.dataBuffer[acmd.dataOffset]) + acmd.dataOffset++ + role := string(acmd.dataBuffer[acmd.dataOffset : acmd.dataOffset+len]) + acmd.dataOffset += len + userRoles.Roles = append(userRoles.Roles, role) + } +} + +func hashPassword(password string) ([]byte, error) { + // Hashing the password with the cost of 10, with a static salt + const salt = "$2a$10$7EqJtq98hPqEX7fNZaFWoO" + hashedPassword, err := bcrypt.Hash(password, salt) + if err != nil { + return nil, err + } + return []byte(hashedPassword), nil +} + +func (acmd *adminCommand) readRoles(cluster *Cluster, policy *AdminPolicy) ([]*Role, error) { + acmd.writeSize() + node, err := cluster.GetRandomNode() + if err != nil { + return nil, err + } + timeout := 1 * time.Second + if policy != nil && policy.Timeout > 0 { + timeout = policy.Timeout + } + + node.tendConnLock.Lock() + defer node.tendConnLock.Unlock() + + if err := node.initTendConn(timeout); err != nil { + return nil, err + } + + conn := node.tendConn + if _, err := conn.Write(acmd.dataBuffer[:acmd.dataOffset]); err != nil { + return nil, err + } + + status, list, err := acmd.readRoleBlocks(conn) + if err != nil { + return nil, err + } + + if status > 0 { + return nil, NewAerospikeError(ResultCode(status)) + } + return list, nil +} + +func (acmd *adminCommand) readRoleBlocks(conn *Connection) (status int, rlist []*Role, err error) { + + var list []*Role + + for status == 0 { + if _, err = conn.Read(acmd.dataBuffer, 8); err != nil { + return -1, nil, err + } + + size := Buffer.BytesToInt64(acmd.dataBuffer, 0) + receiveSize := int(size & 0xFFFFFFFFFFFF) + + if receiveSize > 0 { + if receiveSize > len(acmd.dataBuffer) { + acmd.dataBuffer = make([]byte, receiveSize) + } + if _, err = conn.Read(acmd.dataBuffer, int(receiveSize)); err != nil { + return -1, nil, err + } + status, list, err = acmd.parseRolesFull(receiveSize) + if err != nil { + return -1, nil, err + } + rlist = append(rlist, list...) + } else { + break + } + } + return status, rlist, nil +} + +func (acmd *adminCommand) parseRolesFull(receiveSize int) (int, []*Role, error) { + acmd.dataOffset = 0 + + var list []*Role + for acmd.dataOffset < receiveSize { + resultCode := int(acmd.dataBuffer[acmd.dataOffset+1]) + + if resultCode != 0 { + if resultCode == _QUERY_END { + return -1, nil, nil + } + return resultCode, nil, nil + } + + role := &Role{} + fieldCount := int(acmd.dataBuffer[acmd.dataOffset+3]) + acmd.dataOffset += _HEADER_REMAINING + + for i := 0; i < fieldCount; i++ { + len := int(Buffer.BytesToInt32(acmd.dataBuffer, acmd.dataOffset)) + acmd.dataOffset += 4 + id := acmd.dataBuffer[acmd.dataOffset] + acmd.dataOffset++ + len-- + + if id == _ROLE { + role.Name = string(acmd.dataBuffer[acmd.dataOffset : acmd.dataOffset+len]) + acmd.dataOffset += len + } else if id == _PRIVILEGES { + acmd.parsePrivileges(role) + } else { + acmd.dataOffset += len + } + } + + if len(role.Name) == 0 && len(role.Privileges) == 0 { + continue + } + + if role.Privileges == nil { + role.Privileges = []Privilege{} + } + list = append(list, role) + } + return 0, list, nil +} + +func (acmd *adminCommand) parsePrivileges(role *Role) { + size := int(acmd.dataBuffer[acmd.dataOffset]) + acmd.dataOffset++ + role.Privileges = make([]Privilege, 0, size) + + for i := 0; i < size; i++ { + priv := Privilege{} + priv.Code = privilegeFrom(acmd.dataBuffer[acmd.dataOffset]) + acmd.dataOffset++ + + if priv.canScope() { + len := int(acmd.dataBuffer[acmd.dataOffset]) + acmd.dataOffset++ + priv.Namespace = string(acmd.dataBuffer[acmd.dataOffset : acmd.dataOffset+len]) + acmd.dataOffset += len + + len = int(acmd.dataBuffer[acmd.dataOffset]) + acmd.dataOffset++ + priv.SetName = string(acmd.dataBuffer[acmd.dataOffset : acmd.dataOffset+len]) + acmd.dataOffset += len + } + role.Privileges = append(role.Privileges, priv) + } +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/admin_policy.go b/vendor/github.com/aerospike/aerospike-client-go/admin_policy.go new file mode 100644 index 00000000000..6b0f5e13405 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/admin_policy.go @@ -0,0 +1,32 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import "time" + +// AdminPolicy contains attributes used for user administration commands. +type AdminPolicy struct { + + // User administration command socket timeout in milliseconds. + // Default is one second timeout. + Timeout time.Duration +} + +// NewAdminPolicy generates a new AdminPolicy with default values. +func NewAdminPolicy() *AdminPolicy { + return &AdminPolicy{ + Timeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/aerospike.go b/vendor/github.com/aerospike/aerospike-client-go/aerospike.go new file mode 100644 index 00000000000..e66d4277a33 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/aerospike.go @@ -0,0 +1 @@ +package aerospike diff --git a/vendor/github.com/aerospike/aerospike-client-go/batch_command.go b/vendor/github.com/aerospike/aerospike-client-go/batch_command.go new file mode 100644 index 00000000000..202870f0854 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/batch_command.go @@ -0,0 +1,295 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "fmt" + "reflect" + "time" + + . "github.com/aerospike/aerospike-client-go/types" + xrand "github.com/aerospike/aerospike-client-go/types/rand" + Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" +) + +const ( + _MAX_BUFFER_SIZE = 1024 * 1024 * 10 // 10 MB + _CHUNK_SIZE = 4096 +) + +type multiCommand interface { + Stop() +} + +type baseMultiCommand struct { + baseCommand + + terminationError ResultCode + + recordset *Recordset + + terminationErrorType ResultCode + + errChan chan error + + resObjType reflect.Type + resObjMappings map[string]string + selectCases []reflect.SelectCase +} + +var multiObjectParser func( + cmd *baseMultiCommand, + obj reflect.Value, + opCount int, + fieldCount int, + generation uint32, + expiration uint32, +) error + +var prepareReflectionData func(cmd *baseMultiCommand) + +func newMultiCommand(node *Node, recordset *Recordset) *baseMultiCommand { + cmd := &baseMultiCommand{ + baseCommand: baseCommand{ + node: node, + oneShot: true, + }, + recordset: recordset, + } + + if prepareReflectionData != nil { + prepareReflectionData(cmd) + } + return cmd +} + +func (cmd *baseMultiCommand) getNode(ifc command) (*Node, error) { + return cmd.node, nil +} + +func (cmd *baseMultiCommand) getConnection(timeout time.Duration) (*Connection, error) { + return cmd.node.getConnectionWithHint(timeout, byte(xrand.Int64()%256)) +} + +func (cmd *baseMultiCommand) putConnection(conn *Connection) { + cmd.node.putConnectionWithHint(conn, byte(xrand.Int64()%256)) +} + +func (cmd *baseMultiCommand) drainConn(receiveSize int) error { + // consume the rest of the input buffer from the socket + if cmd.dataOffset < receiveSize && cmd.conn.IsConnected() { + if err := cmd.readBytes(receiveSize - cmd.dataOffset); err != nil { + return err + } + } + return nil +} + +func (cmd *baseMultiCommand) parseResult(ifc command, conn *Connection) error { + // Read socket into receive buffer one record at a time. Do not read entire receive size + // because the receive buffer would be too big. + status := true + + var err error + + for status { + // Read header. + if _, err = cmd.conn.Read(cmd.dataBuffer, 8); err != nil { + return err + } + + size := Buffer.BytesToInt64(cmd.dataBuffer, 0) + + // Validate header to make sure we are at the beginning of a message + if err := cmd.validateHeader(size); err != nil { + return err + } + + receiveSize := int(size & 0xFFFFFFFFFFFF) + if receiveSize > 0 { + status, err = ifc.parseRecordResults(ifc, receiveSize) + cmd.drainConn(receiveSize) + if err != nil { + return err + } + } else { + status = false + } + } + + return nil +} + +func (cmd *baseMultiCommand) parseKey(fieldCount int) (*Key, error) { + var digest [20]byte + var namespace, setName string + var userKey Value + var err error + + for i := 0; i < fieldCount; i++ { + if err = cmd.readBytes(4); err != nil { + return nil, err + } + + fieldlen := int(Buffer.BytesToUint32(cmd.dataBuffer, 0)) + if err = cmd.readBytes(fieldlen); err != nil { + return nil, err + } + + fieldtype := FieldType(cmd.dataBuffer[0]) + size := fieldlen - 1 + + switch fieldtype { + case DIGEST_RIPE: + copy(digest[:], cmd.dataBuffer[1:size+1]) + case NAMESPACE: + namespace = string(cmd.dataBuffer[1 : size+1]) + case TABLE: + setName = string(cmd.dataBuffer[1 : size+1]) + case KEY: + if userKey, err = bytesToKeyValue(int(cmd.dataBuffer[1]), cmd.dataBuffer, 2, size-1); err != nil { + return nil, err + } + } + } + + return &Key{namespace: namespace, setName: setName, digest: digest, userKey: userKey}, nil +} + +func (cmd *baseMultiCommand) readBytes(length int) error { + // Corrupted data streams can result in a huge length. + // Do a sanity check here. + if length > MaxBufferSize || length < 0 { + return NewAerospikeError(PARSE_ERROR, fmt.Sprintf("Invalid readBytes length: %d", length)) + } + + if length > cap(cmd.dataBuffer) { + cmd.dataBuffer = make([]byte, length) + } + + if n, err := cmd.conn.Read(cmd.dataBuffer[:length], length); err != nil { + return fmt.Errorf("Requested to read %d bytes, but %d was read. (%v)", length, n, err) + } + + cmd.dataOffset += length + return nil +} + +func (cmd *baseMultiCommand) parseRecordResults(ifc command, receiveSize int) (bool, error) { + // Read/parse remaining message bytes one record at a time. + cmd.dataOffset = 0 + + for cmd.dataOffset < receiveSize { + if err := cmd.readBytes(int(_MSG_REMAINING_HEADER_SIZE)); err != nil { + err = newNodeError(cmd.node, err) + return false, err + } + resultCode := ResultCode(cmd.dataBuffer[5] & 0xFF) + + if resultCode != 0 { + if resultCode == KEY_NOT_FOUND_ERROR { + return false, nil + } + err := NewAerospikeError(resultCode) + err = newNodeError(cmd.node, err) + return false, err + } + + info3 := int(cmd.dataBuffer[3]) + + // If cmd is the end marker of the response, do not proceed further + if (info3 & _INFO3_LAST) == _INFO3_LAST { + return false, nil + } + + generation := Buffer.BytesToUint32(cmd.dataBuffer, 6) + expiration := TTL(Buffer.BytesToUint32(cmd.dataBuffer, 10)) + fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 18)) + opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 20)) + + key, err := cmd.parseKey(fieldCount) + if err != nil { + err = newNodeError(cmd.node, err) + return false, err + } + + // if there is a recordset, process the record traditionally + // otherwise, it is supposed to be a record channel + if cmd.selectCases == nil { + // Parse bins. + var bins BinMap + + for i := 0; i < opCount; i++ { + if err := cmd.readBytes(8); err != nil { + err = newNodeError(cmd.node, err) + return false, err + } + + opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, 0)) + particleType := int(cmd.dataBuffer[5]) + nameSize := int(cmd.dataBuffer[7]) + + if err := cmd.readBytes(nameSize); err != nil { + err = newNodeError(cmd.node, err) + return false, err + } + name := string(cmd.dataBuffer[:nameSize]) + + particleBytesSize := int((opSize - (4 + nameSize))) + if err = cmd.readBytes(particleBytesSize); err != nil { + err = newNodeError(cmd.node, err) + return false, err + } + value, err := bytesToParticle(particleType, cmd.dataBuffer, 0, particleBytesSize) + if err != nil { + err = newNodeError(cmd.node, err) + return false, err + } + + if bins == nil { + bins = make(BinMap, opCount) + } + bins[name] = value + } + + // If the channel is full and it blocks, we don't want this command to + // block forever, or panic in case the channel is closed in the meantime. + select { + // send back the result on the async channel + case cmd.recordset.Records <- newRecord(cmd.node, key, bins, generation, expiration): + case <-cmd.recordset.cancelled: + return false, NewAerospikeError(cmd.terminationErrorType) + } + } else if multiObjectParser != nil { + obj := reflect.New(cmd.resObjType) + if err := multiObjectParser(cmd, obj, opCount, fieldCount, generation, expiration); err != nil { + err = newNodeError(cmd.node, err) + return false, err + } + + // set the object to send + cmd.selectCases[0].Send = obj + + chosen, _, _ := reflect.Select(cmd.selectCases) + switch chosen { + case 0: // object sent + case 1: // cancel channel is closed + return false, NewAerospikeError(cmd.terminationErrorType) + } + } + } + + return true, nil +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/batch_command_exists.go b/vendor/github.com/aerospike/aerospike-client-go/batch_command_exists.go new file mode 100644 index 00000000000..77f9db36823 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/batch_command_exists.go @@ -0,0 +1,115 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "bytes" + + . "github.com/aerospike/aerospike-client-go/types" + Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" +) + +type batchCommandExists struct { + baseMultiCommand + + batchNamespace *batchNamespace + policy *BasePolicy + keys []*Key + existsArray []bool + index int +} + +func newBatchCommandExists( + node *Node, + batchNamespace *batchNamespace, + policy *BasePolicy, + keys []*Key, + existsArray []bool, +) *batchCommandExists { + res := &batchCommandExists{ + baseMultiCommand: *newMultiCommand(node, nil), + batchNamespace: batchNamespace, + policy: policy, + keys: keys, + existsArray: existsArray, + } + res.oneShot = false + return res +} + +func (cmd *batchCommandExists) getPolicy(ifc command) Policy { + return cmd.policy +} + +func (cmd *batchCommandExists) writeBuffer(ifc command) error { + return cmd.setBatchExists(cmd.policy, cmd.keys, cmd.batchNamespace) +} + +// Parse all results in the batch. Add records to shared list. +// If the record was not found, the bins will be nil. +func (cmd *batchCommandExists) parseRecordResults(ifc command, receiveSize int) (bool, error) { + //Parse each message response and add it to the result array + cmd.dataOffset = 0 + + for cmd.dataOffset < receiveSize { + if err := cmd.readBytes(int(_MSG_REMAINING_HEADER_SIZE)); err != nil { + return false, err + } + + resultCode := ResultCode(cmd.dataBuffer[5] & 0xFF) + + // The only valid server return codes are "ok" and "not found". + // If other return codes are received, then abort the batch. + if resultCode != 0 && resultCode != KEY_NOT_FOUND_ERROR { + return false, NewAerospikeError(resultCode) + } + + info3 := cmd.dataBuffer[3] + + // If cmd is the end marker of the response, do not proceed further + if (int(info3) & _INFO3_LAST) == _INFO3_LAST { + return false, nil + } + + fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 18)) + opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 20)) + + if opCount > 0 { + return false, NewAerospikeError(PARSE_ERROR, "Received bins that were not requested!") + } + + key, err := cmd.parseKey(fieldCount) + if err != nil { + return false, err + } + + offset := cmd.batchNamespace.offsets[cmd.index] + cmd.index++ + + if bytes.Equal(key.digest[:], cmd.keys[offset].digest[:]) { + // only set the results to true; as a result, no synchronization is needed + if resultCode == 0 { + cmd.existsArray[offset] = true + } + } else { + return false, NewAerospikeError(PARSE_ERROR, "Unexpected batch key returned: "+string(key.namespace)+","+Buffer.BytesToHexString(key.digest[:])) + } + } + return true, nil +} + +func (cmd *batchCommandExists) Execute() error { + return cmd.execute(cmd) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/batch_command_get.go b/vendor/github.com/aerospike/aerospike-client-go/batch_command_get.go new file mode 100644 index 00000000000..abee2ed16e6 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/batch_command_get.go @@ -0,0 +1,175 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "bytes" + "reflect" + + . "github.com/aerospike/aerospike-client-go/types" + Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" +) + +type batchCommandGet struct { + baseMultiCommand + + batchNamespace *batchNamespace + policy *BasePolicy + keys []*Key + binNames map[string]struct{} + records []*Record + readAttr int + index int + + // pointer to the object that's going to be unmarshalled + objects []*reflect.Value + objectsFound []bool +} + +// this method uses reflection. +// Will not be set if performance flag is passed for the build. +var batchObjectParser func( + cmd *batchCommandGet, + offset int, + opCount int, + fieldCount int, + generation uint32, + expiration uint32, +) error + +func newBatchCommandGet( + node *Node, + batchNamespace *batchNamespace, + policy *BasePolicy, + keys []*Key, + binNames map[string]struct{}, + records []*Record, + readAttr int, +) *batchCommandGet { + res := &batchCommandGet{ + baseMultiCommand: *newMultiCommand(node, nil), + batchNamespace: batchNamespace, + policy: policy, + keys: keys, + binNames: binNames, + records: records, + readAttr: readAttr, + } + res.oneShot = false + return res +} + +func (cmd *batchCommandGet) getPolicy(ifc command) Policy { + return cmd.policy +} + +func (cmd *batchCommandGet) writeBuffer(ifc command) error { + return cmd.setBatchGet(cmd.policy, cmd.keys, cmd.batchNamespace, cmd.binNames, cmd.readAttr) +} + +// Parse all results in the batch. Add records to shared list. +// If the record was not found, the bins will be nil. +func (cmd *batchCommandGet) parseRecordResults(ifc command, receiveSize int) (bool, error) { + //Parse each message response and add it to the result array + cmd.dataOffset = 0 + + for cmd.dataOffset < receiveSize { + if err := cmd.readBytes(int(_MSG_REMAINING_HEADER_SIZE)); err != nil { + return false, err + } + resultCode := ResultCode(cmd.dataBuffer[5] & 0xFF) + + // The only valid server return codes are "ok" and "not found". + // If other return codes are received, then abort the batch. + if resultCode != 0 && resultCode != KEY_NOT_FOUND_ERROR { + return false, NewAerospikeError(resultCode) + } + + info3 := int(cmd.dataBuffer[3]) + + // If cmd is the end marker of the response, do not proceed further + if (info3 & _INFO3_LAST) == _INFO3_LAST { + return false, nil + } + + generation := Buffer.BytesToUint32(cmd.dataBuffer, 6) + expiration := TTL(Buffer.BytesToUint32(cmd.dataBuffer, 10)) + fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 18)) + opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 20)) + key, err := cmd.parseKey(fieldCount) + if err != nil { + return false, err + } + + offset := cmd.batchNamespace.offsets[cmd.index] //cmd.keyMap[string(key.digest)] + cmd.index++ + + if bytes.Equal(key.digest[:], cmd.keys[offset].digest[:]) { + if resultCode == 0 { + if cmd.objects == nil { + if cmd.records[offset], err = cmd.parseRecord(key, opCount, generation, expiration); err != nil { + return false, err + } + } else if batchObjectParser != nil { + // mark it as found + cmd.objectsFound[offset] = true + if err := batchObjectParser(cmd, offset, opCount, fieldCount, generation, expiration); err != nil { + return false, err + } + } + } + } else { + return false, NewAerospikeError(PARSE_ERROR, "Unexpected batch key returned: "+string(key.namespace)+","+Buffer.BytesToHexString(key.digest[:])) + } + } + return true, nil +} + +// Parses the given byte buffer and populate the result object. +// Returns the number of bytes that were parsed from the given buffer. +func (cmd *batchCommandGet) parseRecord(key *Key, opCount int, generation, expiration uint32) (*Record, error) { + bins := make(map[string]interface{}, opCount) + + for i := 0; i < opCount; i++ { + if err := cmd.readBytes(8); err != nil { + return nil, err + } + opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, 0)) + particleType := int(cmd.dataBuffer[5]) + nameSize := int(cmd.dataBuffer[7]) + + if err := cmd.readBytes(nameSize); err != nil { + return nil, err + } + name := string(cmd.dataBuffer[:nameSize]) + + particleBytesSize := int(opSize - (4 + nameSize)) + if err := cmd.readBytes(particleBytesSize); err != nil { + return nil, err + } + value, err := bytesToParticle(particleType, cmd.dataBuffer, 0, particleBytesSize) + if err != nil { + return nil, err + } + + bins[name] = value + } + + return newRecord(cmd.node, key, bins, generation, expiration), nil +} + +func (cmd *batchCommandGet) Execute() error { + return cmd.execute(cmd) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/batch_command_get_reflect.go b/vendor/github.com/aerospike/aerospike-client-go/batch_command_get_reflect.go new file mode 100644 index 00000000000..c1c5ea16333 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/batch_command_get_reflect.go @@ -0,0 +1,92 @@ +// +build !as_performance + +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "errors" + "reflect" + + . "github.com/aerospike/aerospike-client-go/types" + Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" +) + +// if this file is included in the build, it will include this method +func init() { + batchObjectParser = parseBatchObject +} + +func parseBatchObject( + cmd *batchCommandGet, + offset int, + opCount int, + fieldCount int, + generation uint32, + expiration uint32, +) error { + if opCount > 0 { + rv := *cmd.objects[offset] + + if rv.Kind() != reflect.Ptr { + return errors.New("Invalid type for result object. It should be of type Struct Pointer.") + } + rv = rv.Elem() + + if !rv.CanAddr() { + return errors.New("Invalid type for object. It should be addressable (a pointer)") + } + + if rv.Kind() != reflect.Struct { + return errors.New("Invalid type for object. It should be a pointer to a struct.") + } + + // find the name based on tag mapping + iobj := indirect(rv) + mappings := objectMappings.getMapping(iobj.Type()) + + if err := setObjectMetaFields(iobj, TTL(expiration), generation); err != nil { + return err + } + + for i := 0; i < opCount; i++ { + if err := cmd.readBytes(8); err != nil { + return err + } + opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, 0)) + particleType := int(cmd.dataBuffer[5]) + nameSize := int(cmd.dataBuffer[7]) + + if err := cmd.readBytes(nameSize); err != nil { + return err + } + name := string(cmd.dataBuffer[:nameSize]) + + particleBytesSize := int(opSize - (4 + nameSize)) + if err := cmd.readBytes(particleBytesSize); err != nil { + return err + } + value, err := bytesToParticle(particleType, cmd.dataBuffer, 0, particleBytesSize) + if err != nil { + return err + } + if err := setObjectField(mappings, iobj, name, value); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/batch_command_reflect.go b/vendor/github.com/aerospike/aerospike-client-go/batch_command_reflect.go new file mode 100644 index 00000000000..6e44b6bcebb --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/batch_command_reflect.go @@ -0,0 +1,87 @@ +// +build !as_performance + +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "reflect" + + Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" +) + +// this function will only be set if the performance flag is not passed for build +func init() { + multiObjectParser = batchParseObject + prepareReflectionData = concretePrepareReflectionData +} + +func concretePrepareReflectionData(cmd *baseMultiCommand) { + // if a channel is assigned, assign its value type + if cmd.recordset != nil && !cmd.recordset.objChan.IsNil() { + // this channel must be of type chan *T + cmd.resObjType = cmd.recordset.objChan.Type().Elem().Elem() + cmd.resObjMappings = objectMappings.getMapping(cmd.recordset.objChan.Type().Elem().Elem()) + + cmd.selectCases = []reflect.SelectCase{ + {Dir: reflect.SelectSend, Chan: cmd.recordset.objChan}, + {Dir: reflect.SelectRecv, Chan: reflect.ValueOf(cmd.recordset.cancelled)}, + } + } +} + +func batchParseObject( + cmd *baseMultiCommand, + obj reflect.Value, + opCount int, + fieldCount int, + generation uint32, + expiration uint32, +) error { + for i := 0; i < opCount; i++ { + if err := cmd.readBytes(8); err != nil { + err = newNodeError(cmd.node, err) + return err + } + + opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, 0)) + particleType := int(cmd.dataBuffer[5]) + nameSize := int(cmd.dataBuffer[7]) + + if err := cmd.readBytes(nameSize); err != nil { + err = newNodeError(cmd.node, err) + return err + } + name := string(cmd.dataBuffer[:nameSize]) + + particleBytesSize := int((opSize - (4 + nameSize))) + if err := cmd.readBytes(particleBytesSize); err != nil { + err = newNodeError(cmd.node, err) + return err + } + value, err := bytesToParticle(particleType, cmd.dataBuffer, 0, particleBytesSize) + if err != nil { + err = newNodeError(cmd.node, err) + return err + } + + iobj := indirect(obj) + if err := setObjectField(cmd.resObjMappings, iobj, name, value); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/batch_node.go b/vendor/github.com/aerospike/aerospike-client-go/batch_node.go new file mode 100644 index 00000000000..c372c31c2cf --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/batch_node.go @@ -0,0 +1,117 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + . "github.com/aerospike/aerospike-client-go/types" +) + +type batchNode struct { + Node *Node + BatchNamespaces []*batchNamespace + KeyCapacity int +} + +func newBatchNodeList(cluster *Cluster, policy *BasePolicy, keys []*Key) ([]*batchNode, error) { + nodes := cluster.GetNodes() + + if len(nodes) == 0 { + return nil, NewAerospikeError(SERVER_NOT_AVAILABLE, "command failed because cluster is empty.") + } + + nodeCount := len(nodes) + keysPerNode := len(keys)/nodeCount + 10 + + // Split keys by server node. + batchNodes := make([]*batchNode, 0, nodeCount) + + for i, key := range keys { + partition := NewPartitionByKey(key) + + // error not required + node, err := cluster.getReadNode(partition, policy.ReplicaPolicy) + if err != nil { + return nil, err + } + batchNode := findBatchNode(batchNodes, node) + + if batchNode == nil { + batchNodes = append(batchNodes, newBatchNode(node, keysPerNode, key.Namespace(), i)) + } else { + batchNode.AddKey(key.Namespace(), i) + } + } + return batchNodes, nil +} + +func newBatchNode(node *Node, keyCapacity int, namespace string, offset int) *batchNode { + return &batchNode{ + Node: node, + KeyCapacity: keyCapacity, + BatchNamespaces: []*batchNamespace{newBatchNamespace(&namespace, keyCapacity, offset)}, + } +} + +func (bn *batchNode) AddKey(namespace string, offset int) { + batchNamespace := bn.findNamespace(&namespace) + + if batchNamespace == nil { + bn.BatchNamespaces = append(bn.BatchNamespaces, newBatchNamespace(&namespace, bn.KeyCapacity, offset)) + } else { + batchNamespace.add(offset) + } +} + +func (bn *batchNode) findNamespace(ns *string) *batchNamespace { + for _, batchNamespace := range bn.BatchNamespaces { + // Note: use both pointer equality and equals. + if batchNamespace.namespace == ns || *batchNamespace.namespace == *ns { + return batchNamespace + } + } + return nil +} + +func findBatchNode(nodes []*batchNode, node *Node) *batchNode { + for i := range nodes { + // Note: using pointer equality for performance. + if nodes[i].Node == node { + return nodes[i] + } + } + return nil +} + +type batchNamespace struct { + namespace *string + offsets []int + offsetSize int +} + +func newBatchNamespace(namespace *string, capacity, offset int) *batchNamespace { + res := &batchNamespace{ + namespace: namespace, + offsets: make([]int, 0, capacity), + offsetSize: 1, + } + res.offsets = append(res.offsets, offset) + + return res +} + +func (bn *batchNamespace) add(offset int) { + bn.offsets = append(bn.offsets, offset) + bn.offsetSize++ +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/bin.go b/vendor/github.com/aerospike/aerospike-client-go/bin.go new file mode 100644 index 00000000000..fc195f5ddb8 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/bin.go @@ -0,0 +1,41 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +// BinMap is used to define a map of bin names to values. +type BinMap map[string]interface{} + +// Bin encapsulates a field name/value pair. +type Bin struct { + // Bin name. Current limit is 14 characters. + Name string + + // Bin value. + Value Value +} + +// NewBin generates a new Bin instance, specifying bin name and string value. +// For servers configured as "single-bin", enter an empty name. +func NewBin(name string, value interface{}) *Bin { + return &Bin{ + Name: name, + Value: NewValue(value), + } +} + +// String implements Stringer interface. +func (bn *Bin) String() string { + return bn.Name + ":" + bn.Value.String() +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/bytes_buffer.go b/vendor/github.com/aerospike/aerospike-client-go/bytes_buffer.go new file mode 100644 index 00000000000..cbdd3100aee --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/bytes_buffer.go @@ -0,0 +1,30 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +// BufferEx is a specialized buffer interface for aerospike client. +type BufferEx interface { + WriteInt64(num int64) (int, error) + WriteUint64(num uint64) (int, error) + WriteInt32(num int32) (int, error) + WriteUint32(num uint32) (int, error) + WriteInt16(num int16) (int, error) + WriteUint16(num uint16) (int, error) + WriteFloat32(float float32) (int, error) + WriteFloat64(float float64) (int, error) + WriteByte(b byte) error + WriteString(s string) (int, error) + Write(b []byte) (int, error) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/cdt_list.go b/vendor/github.com/aerospike/aerospike-client-go/cdt_list.go new file mode 100644 index 00000000000..9a14eda7335 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/cdt_list.go @@ -0,0 +1,289 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + . "github.com/aerospike/aerospike-client-go/types" +) + +// List bin operations. Create list operations used by client.Operate command. +// List operations support negative indexing. If the index is negative, the +// resolved index starts backwards from end of list. +// +// Index/Range examples: +// +// Index 0: First item in list. +// Index 4: Fifth item in list. +// Index -1: Last item in list. +// Index -3: Third to last item in list. +// Index 1 Count 2: Second and third items in list. +// Index -3 Count 3: Last three items in list. +// Index -5 Count 4: Range between fifth to last item to second to last item inclusive. +// +// If an index is out of bounds, a parameter error will be returned. If a range is partially +// out of bounds, the valid part of the range will be returned. + +const ( + _CDT_LIST_APPEND = 1 + _CDT_LIST_APPEND_ITEMS = 2 + _CDT_LIST_INSERT = 3 + _CDT_LIST_INSERT_ITEMS = 4 + _CDT_LIST_POP = 5 + _CDT_LIST_POP_RANGE = 6 + _CDT_LIST_REMOVE = 7 + _CDT_LIST_REMOVE_RANGE = 8 + _CDT_LIST_SET = 9 + _CDT_LIST_TRIM = 10 + _CDT_LIST_CLEAR = 11 + _CDT_LIST_SIZE = 16 + _CDT_LIST_GET = 17 + _CDT_LIST_GET_RANGE = 18 +) + +func packCDTParamsAsArray(packer BufferEx, opType int16, params ...Value) (int, error) { + size := 0 + n, err := __PackShortRaw(packer, opType) + if err != nil { + return n, err + } + size += n + + if len(params) > 0 { + if n, err = __PackArrayBegin(packer, len(params)); err != nil { + return size + n, err + } + size += n + + for i := range params { + if n, err = params[i].pack(packer); err != nil { + return size + n, err + } + size += n + } + } + return size, nil +} + +func packCDTIfcParamsAsArray(packer BufferEx, opType int16, params ListValue) (int, error) { + return packCDTIfcVarParamsAsArray(packer, opType, []interface{}(params)...) +} + +func packCDTIfcVarParamsAsArray(packer BufferEx, opType int16, params ...interface{}) (int, error) { + size := 0 + n, err := __PackShortRaw(packer, opType) + if err != nil { + return n, err + } + size += n + + if len(params) > 0 { + if n, err = __PackArrayBegin(packer, len(params)); err != nil { + return size + n, err + } + size += n + + for i := range params { + if n, err = __PackObject(packer, params[i], false); err != nil { + return size + n, err + } + size += n + } + } + return size, nil +} + +func listAppendOpEncoder(op *Operation, packer BufferEx) (int, error) { + params := op.binValue.(ListValue) + if len(params) == 1 { + return packCDTIfcVarParamsAsArray(packer, _CDT_LIST_APPEND, params[0]) + } else if len(params) > 1 { + return packCDTParamsAsArray(packer, _CDT_LIST_APPEND_ITEMS, params) + } + + return -1, NewAerospikeError(PARAMETER_ERROR, "At least one value must be provided for ListAppendOp") +} + +// ListAppendOp creates a list append operation. +// Server appends values to end of list bin. +// Server returns list size on bin name. +// It will panic is no values have been passed. +func ListAppendOp(binName string, values ...interface{}) *Operation { + return &Operation{opType: CDT_MODIFY, binName: binName, binValue: ListValue(values), encoder: listAppendOpEncoder} +} + +func listInsertOpEncoder(op *Operation, packer BufferEx) (int, error) { + args := op.binValue.(ValueArray) + params := args[1].(ListValue) + if len(params) == 1 { + return packCDTIfcVarParamsAsArray(packer, _CDT_LIST_INSERT, args[0], params[0]) + } else if len(params) > 1 { + return packCDTParamsAsArray(packer, _CDT_LIST_INSERT_ITEMS, args[0], params) + } + + return -1, NewAerospikeError(PARAMETER_ERROR, "At least one value must be provided for ListInsertOp") +} + +// ListInsertOp creates a list insert operation. +// Server inserts value to specified index of list bin. +// Server returns list size on bin name. +// It will panic is no values have been passed. +func ListInsertOp(binName string, index int, values ...interface{}) *Operation { + return &Operation{opType: CDT_MODIFY, binName: binName, binValue: ValueArray([]Value{IntegerValue(index), ListValue(values)}), encoder: listInsertOpEncoder} +} + +func listPopOpEncoder(op *Operation, packer BufferEx) (int, error) { + return packCDTParamsAsArray(packer, _CDT_LIST_POP, op.binValue) +} + +// ListPopOp creates list pop operation. +// Server returns item at specified index and removes item from list bin. +func ListPopOp(binName string, index int) *Operation { + return &Operation{opType: CDT_MODIFY, binName: binName, binValue: IntegerValue(index), encoder: listPopOpEncoder} +} + +func listPopRangeOpEncoder(op *Operation, packer BufferEx) (int, error) { + return packCDTParamsAsArray(packer, _CDT_LIST_POP_RANGE, op.binValue.(ValueArray)...) +} + +// ListPopRangeOp creates a list pop range operation. +// Server returns items starting at specified index and removes items from list bin. +func ListPopRangeOp(binName string, index int, count int) *Operation { + if count == 1 { + return ListPopOp(binName, index) + } + + return &Operation{opType: CDT_MODIFY, binName: binName, binValue: ValueArray([]Value{IntegerValue(index), IntegerValue(count)}), encoder: listPopRangeOpEncoder} +} + +func listPopRangeFromOpEncoder(op *Operation, packer BufferEx) (int, error) { + return packCDTParamsAsArray(packer, _CDT_LIST_POP_RANGE, op.binValue) +} + +// ListPopRangeFromOp creates a list pop range operation. +// Server returns items starting at specified index to the end of list and removes items from list bin. +func ListPopRangeFromOp(binName string, index int) *Operation { + return &Operation{opType: CDT_MODIFY, binName: binName, binValue: IntegerValue(index), encoder: listPopRangeFromOpEncoder} +} + +func listRemoveOpEncoder(op *Operation, packer BufferEx) (int, error) { + return packCDTParamsAsArray(packer, _CDT_LIST_REMOVE, op.binValue) +} + +// ListRemoveOp creates a list remove operation. +// Server removes item at specified index from list bin. +// Server returns number of items removed. +func ListRemoveOp(binName string, index int) *Operation { + return &Operation{opType: CDT_MODIFY, binName: binName, binValue: IntegerValue(index), encoder: listRemoveOpEncoder} +} + +func listRemoveRangeOpEncoder(op *Operation, packer BufferEx) (int, error) { + return packCDTParamsAsArray(packer, _CDT_LIST_REMOVE_RANGE, op.binValue.(ValueArray)...) +} + +// ListRemoveRangeOp creates a list remove range operation. +// Server removes "count" items starting at specified index from list bin. +// Server returns number of items removed. +func ListRemoveRangeOp(binName string, index int, count int) *Operation { + if count == 1 { + return ListRemoveOp(binName, index) + } + + return &Operation{opType: CDT_MODIFY, binName: binName, binValue: ValueArray([]Value{IntegerValue(index), IntegerValue(count)}), encoder: listRemoveRangeOpEncoder} +} + +func listRemoveRangeFromOpEncoder(op *Operation, packer BufferEx) (int, error) { + return packCDTParamsAsArray(packer, _CDT_LIST_REMOVE_RANGE, op.binValue) +} + +// ListRemoveRangeFromOp creates a list remove range operation. +// Server removes all items starting at specified index to the end of list. +// Server returns number of items removed. +func ListRemoveRangeFromOp(binName string, index int) *Operation { + return &Operation{opType: CDT_MODIFY, binName: binName, binValue: IntegerValue(index), encoder: listRemoveRangeFromOpEncoder} +} + +func listSetOpEncoder(op *Operation, packer BufferEx) (int, error) { + return packCDTIfcParamsAsArray(packer, _CDT_LIST_SET, op.binValue.(ListValue)) +} + +// ListSetOp creates a list set operation. +// Server sets item value at specified index in list bin. +// Server does not return a result by default. +func ListSetOp(binName string, index int, value interface{}) *Operation { + return &Operation{opType: CDT_MODIFY, binName: binName, binValue: ListValue([]interface{}{IntegerValue(index), value}), encoder: listSetOpEncoder} +} + +func listTrimOpEncoder(op *Operation, packer BufferEx) (int, error) { + return packCDTParamsAsArray(packer, _CDT_LIST_TRIM, op.binValue.(ValueArray)...) +} + +// ListTrimOp creates a list trim operation. +// Server removes "count" items in list bin that do not fall into range specified +// by index and count range. If the range is out of bounds, then all items will be removed. +// Server returns number of elemts that were removed. +func ListTrimOp(binName string, index int, count int) *Operation { + return &Operation{opType: CDT_MODIFY, binName: binName, binValue: ValueArray([]Value{IntegerValue(index), IntegerValue(count)}), encoder: listTrimOpEncoder} +} + +func listClearOpEncoder(op *Operation, packer BufferEx) (int, error) { + return packCDTParamsAsArray(packer, _CDT_LIST_CLEAR) +} + +// ListClearOp creates a list clear operation. +// Server removes all items in list bin. +// Server does not return a result by default. +func ListClearOp(binName string) *Operation { + return &Operation{opType: CDT_MODIFY, binName: binName, binValue: NewNullValue(), encoder: listClearOpEncoder} +} + +func listSizeOpEncoder(op *Operation, packer BufferEx) (int, error) { + return packCDTParamsAsArray(packer, _CDT_LIST_SIZE) +} + +// ListSizeOp creates a list size operation. +// Server returns size of list on bin name. +func ListSizeOp(binName string) *Operation { + return &Operation{opType: CDT_READ, binName: binName, binValue: NewNullValue(), encoder: listSizeOpEncoder} +} + +func listGetOpEncoder(op *Operation, packer BufferEx) (int, error) { + return packCDTParamsAsArray(packer, _CDT_LIST_GET, op.binValue) +} + +// ListGetOp creates a list get operation. +// Server returns item at specified index in list bin. +func ListGetOp(binName string, index int) *Operation { + return &Operation{opType: CDT_READ, binName: binName, binValue: IntegerValue(index), encoder: listGetOpEncoder} +} + +func listGetRangeOpEncoder(op *Operation, packer BufferEx) (int, error) { + return packCDTParamsAsArray(packer, _CDT_LIST_GET_RANGE, op.binValue.(ValueArray)...) +} + +// ListGetRangeOp creates a list get range operation. +// Server returns "count" items starting at specified index in list bin. +func ListGetRangeOp(binName string, index int, count int) *Operation { + return &Operation{opType: CDT_READ, binName: binName, binValue: ValueArray([]Value{IntegerValue(index), IntegerValue(count)}), encoder: listGetRangeOpEncoder} +} + +func listGetRangeFromOpEncoder(op *Operation, packer BufferEx) (int, error) { + return packCDTParamsAsArray(packer, _CDT_LIST_GET_RANGE, op.binValue) +} + +// ListGetRangeFromOp creates a list get range operation. +// Server returns items starting at specified index to the end of list. +func ListGetRangeFromOp(binName string, index int) *Operation { + return &Operation{opType: CDT_READ, binName: binName, binValue: IntegerValue(index), encoder: listGetRangeFromOpEncoder} +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/cdt_map.go b/vendor/github.com/aerospike/aerospike-client-go/cdt_map.go new file mode 100644 index 00000000000..9407d4a6291 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/cdt_map.go @@ -0,0 +1,542 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Portions may be licensed to Aerospike, Inc. under one or more contributor +// license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. + +package aerospike + +const ( + _CDT_MAP_SET_TYPE = 64 + _CDT_MAP_ADD = 65 + _CDT_MAP_ADD_ITEMS = 66 + _CDT_MAP_PUT = 67 + _CDT_MAP_PUT_ITEMS = 68 + _CDT_MAP_REPLACE = 69 + _CDT_MAP_REPLACE_ITEMS = 70 + _CDT_MAP_INCREMENT = 73 + _CDT_MAP_DECREMENT = 74 + _CDT_MAP_CLEAR = 75 + _CDT_MAP_REMOVE_BY_KEY = 76 + _CDT_MAP_REMOVE_BY_INDEX = 77 + _CDT_MAP_REMOVE_BY_RANK = 79 + _CDT_MAP_REMOVE_KEY_LIST = 81 + _CDT_MAP_REMOVE_BY_VALUE = 82 + _CDT_MAP_REMOVE_VALUE_LIST = 83 + _CDT_MAP_REMOVE_BY_KEY_INTERVAL = 84 + _CDT_MAP_REMOVE_BY_INDEX_RANGE = 85 + _CDT_MAP_REMOVE_BY_VALUE_INTERVAL = 86 + _CDT_MAP_REMOVE_BY_RANK_RANGE = 87 + _CDT_MAP_SIZE = 96 + _CDT_MAP_GET_BY_KEY = 97 + _CDT_MAP_GET_BY_INDEX = 98 + _CDT_MAP_GET_BY_RANK = 100 + _CDT_MAP_GET_BY_VALUE = 102 + _CDT_MAP_GET_BY_KEY_INTERVAL = 103 + _CDT_MAP_GET_BY_INDEX_RANGE = 104 + _CDT_MAP_GET_BY_VALUE_INTERVAL = 105 + _CDT_MAP_GET_BY_RANK_RANGE = 106 +) + +type mapOrderType int + +// Map storage order. +var MapOrder = struct { + // Map is not ordered. This is the default. + UNORDERED mapOrderType // 0 + + // Order map by key. + KEY_ORDERED mapOrderType // 1 + + // Order map by key, then value. + KEY_VALUE_ORDERED mapOrderType // 3 +}{0, 1, 3} + +type mapReturnType int + +// Map return type. Type of data to return when selecting or removing items from the map. +var MapReturnType = struct { + // Do not return a result. + NONE mapReturnType + + // Return key index order. + // + // 0 = first key + // N = Nth key + // -1 = last key + INDEX mapReturnType + + // Return reverse key order. + // + // 0 = last key + // -1 = first key + REVERSE_INDEX mapReturnType + + // Return value order. + // + // 0 = smallest value + // N = Nth smallest value + // -1 = largest value + RANK mapReturnType + + // Return reserve value order. + // + // 0 = largest value + // N = Nth largest value + // -1 = smallest value + REVERSE_RANK mapReturnType + + // Return count of items selected. + COUNT mapReturnType + + // Return key for single key read and key list for range read. + KEY mapReturnType + + // Return value for single key read and value list for range read. + VALUE mapReturnType + + // Return key/value items. The possible return types are: + // + // map[interface{}]interface{} : Returned for unordered maps + // []MapPair : Returned for range results where range order needs to be preserved. + KEY_VALUE mapReturnType +}{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, +} + +// Unique key map write type. +type mapWriteMode struct { + itemCommand int + itemsCommand int +} + +var MapWriteMode = struct { + // If the key already exists, the item will be overwritten. + // If the key does not exist, a new item will be created. + UPDATE *mapWriteMode + + // If the key already exists, the item will be overwritten. + // If the key does not exist, the write will fail. + UPDATE_ONLY *mapWriteMode + + // If the key already exists, the write will fail. + // If the key does not exist, a new item will be created. + CREATE_ONLY *mapWriteMode +}{ + &mapWriteMode{_CDT_MAP_PUT, _CDT_MAP_PUT_ITEMS}, + &mapWriteMode{_CDT_MAP_REPLACE, _CDT_MAP_REPLACE_ITEMS}, + &mapWriteMode{_CDT_MAP_ADD, _CDT_MAP_ADD_ITEMS}, +} + +// MapPolicy directives when creating a map and writing map items. +type MapPolicy struct { + attributes mapOrderType + itemCommand int + itemsCommand int +} + +// Create unique key map with specified order when map does not exist. +// Use specified write mode when writing map items. +func NewMapPolicy(order mapOrderType, writeMode *mapWriteMode) *MapPolicy { + return &MapPolicy{ + attributes: order, + itemCommand: writeMode.itemCommand, + itemsCommand: writeMode.itemsCommand, + } +} + +// DefaultMapPolicy returns the default map policy +func DefaultMapPolicy() *MapPolicy { + return NewMapPolicy(MapOrder.UNORDERED, MapWriteMode.UPDATE) +} + +func newMapSetPolicyEncoder(op *Operation, packer BufferEx) (int, error) { + return packCDTParamsAsArray(packer, _CDT_MAP_SET_TYPE, op.binValue.(IntegerValue)) +} + +func newMapSetPolicy(binName string, attributes mapOrderType) *Operation { + return &Operation{ + opType: MAP_MODIFY, + binName: binName, + binValue: IntegerValue(attributes), + encoder: newMapSetPolicyEncoder, + } +} + +func newMapCreatePutEncoder(op *Operation, packer BufferEx) (int, error) { + return packCDTIfcParamsAsArray(packer, int16(*op.opSubType), op.binValue.(ListValue)) +} + +func newMapCreatePut(command int, attributes mapOrderType, binName string, value1 interface{}, value2 interface{}) *Operation { + if command == _CDT_MAP_REPLACE { + // Replace doesn't allow map attributes because it does not create on non-existing key. + return &Operation{ + opType: MAP_MODIFY, + opSubType: &command, + binName: binName, + binValue: ListValue([]interface{}{value1, value2}), + encoder: newMapCreatePutEncoder, + } + } + + return &Operation{ + opType: MAP_MODIFY, + opSubType: &command, + binName: binName, + binValue: ListValue([]interface{}{value1, value2, IntegerValue(attributes)}), + encoder: newMapCreatePutEncoder, + } +} + +func newMapCreateOperationEncoder(op *Operation, packer BufferEx) (int, error) { + if op.binValue != nil { + if params := op.binValue.(ListValue); len(params) > 0 { + return packCDTIfcParamsAsArray(packer, int16(*op.opSubType), op.binValue.(ListValue)) + } + } + return packCDTParamsAsArray(packer, int16(*op.opSubType)) +} + +func newMapCreateOperationValues2(command int, attributes mapOrderType, binName string, value1 interface{}, value2 interface{}) *Operation { + return &Operation{ + opType: MAP_MODIFY, + opSubType: &command, + binName: binName, + binValue: ListValue([]interface{}{value1, value2, IntegerValue(attributes)}), + encoder: newMapCreateOperationEncoder, + } +} + +func newMapCreateOperationValues0(command int, typ OperationType, binName string) *Operation { + return &Operation{ + opType: typ, + opSubType: &command, + binName: binName, + // binValue: NewNullValue(), + encoder: newMapCreateOperationEncoder, + } +} + +func newMapCreateOperationValuesN(command int, typ OperationType, binName string, values []interface{}, returnType mapReturnType) *Operation { + return &Operation{ + opType: typ, + opSubType: &command, + binName: binName, + binValue: ListValue([]interface{}{IntegerValue(returnType), ListValue(values)}), + encoder: newMapCreateOperationEncoder, + } +} + +func newMapCreateOperationValue1(command int, typ OperationType, binName string, value interface{}, returnType mapReturnType) *Operation { + return &Operation{ + opType: typ, + opSubType: &command, + binName: binName, + binValue: ListValue([]interface{}{IntegerValue(returnType), value}), + encoder: newMapCreateOperationEncoder, + } +} + +func newMapCreateOperationIndex(command int, typ OperationType, binName string, index int, returnType mapReturnType) *Operation { + return &Operation{ + opType: typ, + opSubType: &command, + binName: binName, + binValue: ListValue([]interface{}{IntegerValue(returnType), index}), + encoder: newMapCreateOperationEncoder, + } +} + +func newMapCreateOperationIndexCount(command int, typ OperationType, binName string, index int, count int, returnType mapReturnType) *Operation { + return &Operation{ + opType: typ, + opSubType: &command, + binName: binName, + binValue: ListValue([]interface{}{IntegerValue(returnType), index, count}), + encoder: newMapCreateOperationEncoder, + } +} + +func newMapCreateRangeOperation(command int, typ OperationType, binName string, begin interface{}, end interface{}, returnType mapReturnType) *Operation { + if end == nil { + return &Operation{ + opType: typ, + opSubType: &command, + binName: binName, + binValue: ListValue([]interface{}{IntegerValue(returnType), begin}), + encoder: newMapCreateOperationEncoder, + } + } + + return &Operation{ + opType: typ, + opSubType: &command, + binName: binName, + binValue: ListValue([]interface{}{IntegerValue(returnType), begin, end}), + encoder: newMapCreateOperationEncoder, + } +} + +///////////////////////// + +// Unique key map bin operations. Create map operations used by the client operate command. +// The default unique key map is unordered. +// +// All maps maintain an index and a rank. The index is the item offset from the start of the map, +// for both unordered and ordered maps. The rank is the sorted index of the value component. +// Map supports negative indexing for index and rank. +// +// Index examples: +// +// Index 0: First item in map. +// Index 4: Fifth item in map. +// Index -1: Last item in map. +// Index -3: Third to last item in map. +// Index 1 Count 2: Second and third items in map. +// Index -3 Count 3: Last three items in map. +// Index -5 Count 4: Range between fifth to last item to second to last item inclusive. +// +// Rank examples: +// +// Rank 0: Item with lowest value rank in map. +// Rank 4: Fifth lowest ranked item in map. +// Rank -1: Item with highest ranked value in map. +// Rank -3: Item with third highest ranked value in map. +// Rank 1 Count 2: Second and third lowest ranked items in map. +// Rank -3 Count 3: Top three ranked items in map. + +// MapSetPolicyOp creates set map policy operation. +// Server sets map policy attributes. Server returns null. +// +// The required map policy attributes can be changed after the map is created. +func MapSetPolicyOp(policy *MapPolicy, binName string) *Operation { + return newMapSetPolicy(binName, policy.attributes) +} + +// MapPutOp creates map put operation. +// Server writes key/value item to map bin and returns map size. +// +// The required map policy dictates the type of map to create when it does not exist. +// The map policy also specifies the mode used when writing items to the map. +func MapPutOp(policy *MapPolicy, binName string, key interface{}, value interface{}) *Operation { + return newMapCreatePut(policy.itemCommand, policy.attributes, binName, key, value) +} + +// MapPutItemsOp creates map put items operation +// Server writes each map item to map bin and returns map size. +// +// The required map policy dictates the type of map to create when it does not exist. +// The map policy also specifies the mode used when writing items to the map. +func MapPutItemsOp(policy *MapPolicy, binName string, amap map[interface{}]interface{}) *Operation { + if policy.itemsCommand == int(_CDT_MAP_REPLACE_ITEMS) { + // Replace doesn't allow map attributes because it does not create on non-existing key. + return &Operation{ + opType: MAP_MODIFY, + opSubType: &policy.itemsCommand, + binName: binName, + binValue: ListValue([]interface{}{MapValue(amap)}), + encoder: newMapCreateOperationEncoder, + } + } + + return &Operation{ + opType: MAP_MODIFY, + opSubType: &policy.itemsCommand, + binName: binName, + binValue: ListValue([]interface{}{MapValue(amap), IntegerValue(policy.attributes)}), + encoder: newMapCreateOperationEncoder, + } +} + +// MapIncrementOp creates map increment operation. +// Server increments values by incr for all items identified by key and returns final result. +// Valid only for numbers. +// +// The required map policy dictates the type of map to create when it does not exist. +// The map policy also specifies the mode used when writing items to the map. +func MapIncrementOp(policy *MapPolicy, binName string, key interface{}, incr interface{}) *Operation { + return newMapCreateOperationValues2(_CDT_MAP_INCREMENT, policy.attributes, binName, key, incr) +} + +// MapDecrementOp creates map decrement operation. +// Server decrements values by decr for all items identified by key and returns final result. +// Valid only for numbers. +// +// The required map policy dictates the type of map to create when it does not exist. +// The map policy also specifies the mode used when writing items to the map. +func MapDecrementOp(policy *MapPolicy, binName string, key interface{}, decr interface{}) *Operation { + return newMapCreateOperationValues2(_CDT_MAP_DECREMENT, policy.attributes, binName, key, decr) +} + +// MapClearOp creates map clear operation. +// Server removes all items in map. Server returns null. +func MapClearOp(binName string) *Operation { + return newMapCreateOperationValues0(_CDT_MAP_CLEAR, MAP_MODIFY, binName) +} + +// MapRemoveByKeyOp creates map remove operation. +// Server removes map item identified by key and returns removed data specified by returnType. +func MapRemoveByKeyOp(binName string, key interface{}, returnType mapReturnType) *Operation { + return newMapCreateOperationValue1(_CDT_MAP_REMOVE_BY_KEY, MAP_MODIFY, binName, key, returnType) +} + +// MapRemoveByKeyListOp creates map remove operation. +// Server removes map items identified by keys and returns removed data specified by returnType. +func MapRemoveByKeyListOp(binName string, keys []interface{}, returnType mapReturnType) *Operation { + return newMapCreateOperationValue1(_CDT_MAP_REMOVE_KEY_LIST, MAP_MODIFY, binName, keys, returnType) +} + +// MapRemoveByKeyRangeOp creates map remove operation. +// Server removes map items identified by key range (keyBegin inclusive, keyEnd exclusive). +// If keyBegin is null, the range is less than keyEnd. +// If keyEnd is null, the range is greater than equal to keyBegin. +// +// Server returns removed data specified by returnType. +func MapRemoveByKeyRangeOp(binName string, keyBegin interface{}, keyEnd interface{}, returnType mapReturnType) *Operation { + return newMapCreateRangeOperation(_CDT_MAP_REMOVE_BY_KEY_INTERVAL, MAP_MODIFY, binName, keyBegin, keyEnd, returnType) +} + +// MapRemoveByValueOp creates map remove operation. +// Server removes map items identified by value and returns removed data specified by returnType. +func MapRemoveByValueOp(binName string, value interface{}, returnType mapReturnType) *Operation { + return newMapCreateOperationValue1(_CDT_MAP_REMOVE_BY_VALUE, MAP_MODIFY, binName, value, returnType) +} + +// MapRemoveByValueListOp creates map remove operation. +// Server removes map items identified by values and returns removed data specified by returnType. +func MapRemoveByValueListOp(binName string, values []interface{}, returnType mapReturnType) *Operation { + return newMapCreateOperationValuesN(_CDT_MAP_REMOVE_VALUE_LIST, MAP_MODIFY, binName, values, returnType) +} + +// MapRemoveByValueRangeOp creates map remove operation. +// Server removes map items identified by value range (valueBegin inclusive, valueEnd exclusive). +// If valueBegin is null, the range is less than valueEnd. +// If valueEnd is null, the range is greater than equal to valueBegin. +// +// Server returns removed data specified by returnType. +func MapRemoveByValueRangeOp(binName string, valueBegin interface{}, valueEnd interface{}, returnType mapReturnType) *Operation { + return newMapCreateRangeOperation(_CDT_MAP_REMOVE_BY_VALUE_INTERVAL, MAP_MODIFY, binName, valueBegin, valueEnd, returnType) +} + +// MapRemoveByIndexOp creates map remove operation. +// Server removes map item identified by index and returns removed data specified by returnType. +func MapRemoveByIndexOp(binName string, index int, returnType mapReturnType) *Operation { + return newMapCreateOperationValue1(_CDT_MAP_REMOVE_BY_INDEX, MAP_MODIFY, binName, index, returnType) +} + +// MapRemoveByIndexRangeOp creates map remove operation. +// Server removes map items starting at specified index to the end of map and returns removed +// data specified by returnType. +func MapRemoveByIndexRangeOp(binName string, index int, returnType mapReturnType) *Operation { + return newMapCreateOperationValue1(_CDT_MAP_REMOVE_BY_INDEX_RANGE, MAP_MODIFY, binName, index, returnType) +} + +// MapRemoveByIndexRangeCountOp creates map remove operation. +// Server removes "count" map items starting at specified index and returns removed data specified by returnType. +func MapRemoveByIndexRangeCountOp(binName string, index int, count int, returnType mapReturnType) *Operation { + return newMapCreateOperationIndexCount(_CDT_MAP_REMOVE_BY_INDEX_RANGE, MAP_MODIFY, binName, index, count, returnType) +} + +// MapRemoveByRankOp creates map remove operation. +// Server removes map item identified by rank and returns removed data specified by returnType. +func MapRemoveByRankOp(binName string, rank int, returnType mapReturnType) *Operation { + return newMapCreateOperationValue1(_CDT_MAP_REMOVE_BY_RANK, MAP_MODIFY, binName, rank, returnType) +} + +// MapRemoveByRankRangeOp creates map remove operation. +// Server removes map items starting at specified rank to the last ranked item and returns removed +// data specified by returnType. +func MapRemoveByRankRangeOp(binName string, rank int, returnType mapReturnType) *Operation { + return newMapCreateOperationIndex(_CDT_MAP_REMOVE_BY_RANK_RANGE, MAP_MODIFY, binName, rank, returnType) +} + +// MapRemoveByRankRangeCountOp creates map remove operation. +// Server removes "count" map items starting at specified rank and returns removed data specified by returnType. +func MapRemoveByRankRangeCountOp(binName string, rank int, count int, returnType mapReturnType) *Operation { + return newMapCreateOperationIndexCount(_CDT_MAP_REMOVE_BY_RANK_RANGE, MAP_MODIFY, binName, rank, count, returnType) +} + +// MapSizeOp creates map size operation. +// Server returns size of map. +func MapSizeOp(binName string) *Operation { + return newMapCreateOperationValues0(_CDT_MAP_SIZE, MAP_READ, binName) +} + +// MapGetByKeyOp creates map get by key operation. +// Server selects map item identified by key and returns selected data specified by returnType. +func MapGetByKeyOp(binName string, key interface{}, returnType mapReturnType) *Operation { + return newMapCreateOperationValue1(_CDT_MAP_GET_BY_KEY, MAP_READ, binName, key, returnType) +} + +// MapGetByKeyRangeOp creates map get by key range operation. +// Server selects map items identified by key range (keyBegin inclusive, keyEnd exclusive). +// If keyBegin is null, the range is less than keyEnd. +// If keyEnd is null, the range is greater than equal to keyBegin. +// +// Server returns selected data specified by returnType. +func MapGetByKeyRangeOp(binName string, keyBegin interface{}, keyEnd interface{}, returnType mapReturnType) *Operation { + return newMapCreateRangeOperation(_CDT_MAP_GET_BY_KEY_INTERVAL, MAP_READ, binName, keyBegin, keyEnd, returnType) +} + +// MapGetByValueOp creates map get by value operation. +// Server selects map items identified by value and returns selected data specified by returnType. +func MapGetByValueOp(binName string, value interface{}, returnType mapReturnType) *Operation { + return newMapCreateOperationValue1(_CDT_MAP_GET_BY_VALUE, MAP_READ, binName, value, returnType) +} + +// MapGetByValueRangeOp creates map get by value range operation. +// Server selects map items identified by value range (valueBegin inclusive, valueEnd exclusive) +// If valueBegin is null, the range is less than valueEnd. +// If valueEnd is null, the range is greater than equal to valueBegin. +// +// Server returns selected data specified by returnType. +func MapGetByValueRangeOp(binName string, valueBegin interface{}, valueEnd interface{}, returnType mapReturnType) *Operation { + return newMapCreateRangeOperation(_CDT_MAP_GET_BY_VALUE_INTERVAL, MAP_READ, binName, valueBegin, valueEnd, returnType) +} + +// MapGetByIndexOp creates map get by index operation. +// Server selects map item identified by index and returns selected data specified by returnType. +func MapGetByIndexOp(binName string, index int, returnType mapReturnType) *Operation { + return newMapCreateOperationValue1(_CDT_MAP_GET_BY_INDEX, MAP_READ, binName, index, returnType) +} + +// MapGetByIndexRangeOp creates map get by index range operation. +// Server selects map items starting at specified index to the end of map and returns selected +// data specified by returnType. +func MapGetByIndexRangeOp(binName string, index int, returnType mapReturnType) *Operation { + return newMapCreateOperationValue1(_CDT_MAP_GET_BY_INDEX_RANGE, MAP_READ, binName, index, returnType) +} + +// MapGetByIndexRangeCountOp creates map get by index range operation. +// Server selects "count" map items starting at specified index and returns selected data specified by returnType. +func MapGetByIndexRangeCountOp(binName string, index int, count int, returnType mapReturnType) *Operation { + return newMapCreateOperationIndexCount(_CDT_MAP_GET_BY_INDEX_RANGE, MAP_READ, binName, index, count, returnType) +} + +// MapGetByRankOp creates map get by rank operation. +// Server selects map item identified by rank and returns selected data specified by returnType. +func MapGetByRankOp(binName string, rank int, returnType mapReturnType) *Operation { + return newMapCreateOperationValue1(_CDT_MAP_GET_BY_RANK, MAP_READ, binName, rank, returnType) +} + +// MapGetByRankRangeOp creates map get by rank range operation. +// Server selects map items starting at specified rank to the last ranked item and returns selected +// data specified by returnType. +func MapGetByRankRangeOp(binName string, rank int, returnType mapReturnType) *Operation { + return newMapCreateOperationValue1(_CDT_MAP_GET_BY_RANK_RANGE, MAP_READ, binName, rank, returnType) +} + +// MapGetByRankRangeCountOp creates map get by rank range operation. +// Server selects "count" map items starting at specified rank and returns selected data specified by returnType. +func MapGetByRankRangeCountOp(binName string, rank int, count int, returnType mapReturnType) *Operation { + return newMapCreateOperationIndexCount(_CDT_MAP_GET_BY_RANK_RANGE, MAP_READ, binName, rank, count, returnType) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/client.go b/vendor/github.com/aerospike/aerospike-client-go/client.go new file mode 100644 index 00000000000..02844b7f31a --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/client.go @@ -0,0 +1,1378 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "io/ioutil" + "runtime" + "strconv" + "strings" + "sync" + "time" + + lualib "github.com/aerospike/aerospike-client-go/internal/lua" + . "github.com/aerospike/aerospike-client-go/logger" + . "github.com/aerospike/aerospike-client-go/types" + xornd "github.com/aerospike/aerospike-client-go/types/rand" + "github.com/yuin/gopher-lua" +) + +// Client encapsulates an Aerospike cluster. +// All database operations are available against this object. +type Client struct { + cluster *Cluster + + // DefaultPolicy is used for all read commands without a specific policy. + DefaultPolicy *BasePolicy + // DefaultWritePolicy is used for all write commands without a specific policy. + DefaultWritePolicy *WritePolicy + // DefaultScanPolicy is used for all scan commands without a specific policy. + DefaultScanPolicy *ScanPolicy + // DefaultQueryPolicy is used for all query commands without a specific policy. + DefaultQueryPolicy *QueryPolicy + // DefaultAdminPolicy is used for all security commands without a specific policy. + DefaultAdminPolicy *AdminPolicy +} + +func clientFinalizer(f *Client) { + f.Close() +} + +//------------------------------------------------------- +// Constructors +//------------------------------------------------------- + +// NewClient generates a new Client instance. +func NewClient(hostname string, port int) (*Client, error) { + return NewClientWithPolicyAndHost(NewClientPolicy(), NewHost(hostname, port)) +} + +// NewClientWithPolicy generates a new Client using the specified ClientPolicy. +// If the policy is nil, the default relevant policy will be used. +func NewClientWithPolicy(policy *ClientPolicy, hostname string, port int) (*Client, error) { + return NewClientWithPolicyAndHost(policy, NewHost(hostname, port)) +} + +// NewClientWithPolicyAndHost generates a new Client the specified ClientPolicy and +// sets up the cluster using the provided hosts. +// If the policy is nil, the default relevant policy will be used. +func NewClientWithPolicyAndHost(policy *ClientPolicy, hosts ...*Host) (*Client, error) { + if policy == nil { + policy = NewClientPolicy() + } + + cluster, err := NewCluster(policy, hosts) + if err != nil && policy.FailIfNotConnected { + if aerr, ok := err.(AerospikeError); ok { + Logger.Debug("Failed to connect to host(s): %v; error: %s", hosts, err) + return nil, aerr + } + return nil, fmt.Errorf("Failed to connect to host(s): %v; error: %s", hosts, err) + } + + client := &Client{ + cluster: cluster, + DefaultPolicy: NewPolicy(), + DefaultWritePolicy: NewWritePolicy(0, 0), + DefaultScanPolicy: NewScanPolicy(), + DefaultQueryPolicy: NewQueryPolicy(), + DefaultAdminPolicy: NewAdminPolicy(), + } + + runtime.SetFinalizer(client, clientFinalizer) + return client, err + +} + +//------------------------------------------------------- +// Cluster Connection Management +//------------------------------------------------------- + +// Close closes all client connections to database server nodes. +func (clnt *Client) Close() { + clnt.cluster.Close() +} + +// IsConnected determines if the client is ready to talk to the database server cluster. +func (clnt *Client) IsConnected() bool { + return clnt.cluster.IsConnected() +} + +// GetNodes returns an array of active server nodes in the cluster. +func (clnt *Client) GetNodes() []*Node { + return clnt.cluster.GetNodes() +} + +// GetNodeNames returns a list of active server node names in the cluster. +func (clnt *Client) GetNodeNames() []string { + nodes := clnt.cluster.GetNodes() + names := make([]string, 0, len(nodes)) + + for _, node := range nodes { + names = append(names, node.GetName()) + } + return names +} + +//------------------------------------------------------- +// Write Record Operations +//------------------------------------------------------- + +// Put writes record bin(s) to the server. +// The policy specifies the transaction timeout, record expiration and how the transaction is +// handled when the record already exists. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) Put(policy *WritePolicy, key *Key, binMap BinMap) error { + policy = clnt.getUsableWritePolicy(policy) + command := newWriteCommand(clnt.cluster, policy, key, nil, binMap, WRITE) + return command.Execute() +} + +// PutBins writes record bin(s) to the server. +// The policy specifies the transaction timeout, record expiration and how the transaction is +// handled when the record already exists. +// This method avoids using the BinMap allocation and iteration and is lighter on GC. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) PutBins(policy *WritePolicy, key *Key, bins ...*Bin) error { + policy = clnt.getUsableWritePolicy(policy) + command := newWriteCommand(clnt.cluster, policy, key, bins, nil, WRITE) + return command.Execute() +} + +//------------------------------------------------------- +// Operations string +//------------------------------------------------------- + +// Append appends bin value's string to existing record bin values. +// The policy specifies the transaction timeout, record expiration and how the transaction is +// handled when the record already exists. +// This call only works for string and []byte values. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) Append(policy *WritePolicy, key *Key, binMap BinMap) error { + policy = clnt.getUsableWritePolicy(policy) + command := newWriteCommand(clnt.cluster, policy, key, nil, binMap, APPEND) + return command.Execute() +} + +// AppendBins works the same as Append, but avoids BinMap allocation and iteration. +func (clnt *Client) AppendBins(policy *WritePolicy, key *Key, bins ...*Bin) error { + policy = clnt.getUsableWritePolicy(policy) + command := newWriteCommand(clnt.cluster, policy, key, bins, nil, APPEND) + return command.Execute() +} + +// Prepend prepends bin value's string to existing record bin values. +// The policy specifies the transaction timeout, record expiration and how the transaction is +// handled when the record already exists. +// This call works only for string and []byte values. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) Prepend(policy *WritePolicy, key *Key, binMap BinMap) error { + policy = clnt.getUsableWritePolicy(policy) + command := newWriteCommand(clnt.cluster, policy, key, nil, binMap, PREPEND) + return command.Execute() +} + +// PrependBins works the same as Prepend, but avoids BinMap allocation and iteration. +func (clnt *Client) PrependBins(policy *WritePolicy, key *Key, bins ...*Bin) error { + policy = clnt.getUsableWritePolicy(policy) + command := newWriteCommand(clnt.cluster, policy, key, bins, nil, PREPEND) + return command.Execute() +} + +//------------------------------------------------------- +// Arithmetic Operations +//------------------------------------------------------- + +// Add adds integer bin values to existing record bin values. +// The policy specifies the transaction timeout, record expiration and how the transaction is +// handled when the record already exists. +// This call only works for integer values. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) Add(policy *WritePolicy, key *Key, binMap BinMap) error { + policy = clnt.getUsableWritePolicy(policy) + command := newWriteCommand(clnt.cluster, policy, key, nil, binMap, ADD) + return command.Execute() +} + +// AddBins works the same as Add, but avoids BinMap allocation and iteration. +func (clnt *Client) AddBins(policy *WritePolicy, key *Key, bins ...*Bin) error { + policy = clnt.getUsableWritePolicy(policy) + command := newWriteCommand(clnt.cluster, policy, key, bins, nil, ADD) + return command.Execute() +} + +//------------------------------------------------------- +// Delete Operations +//------------------------------------------------------- + +// Delete deletes a record for specified key. +// The policy specifies the transaction timeout. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) Delete(policy *WritePolicy, key *Key) (bool, error) { + policy = clnt.getUsableWritePolicy(policy) + command := newDeleteCommand(clnt.cluster, policy, key) + err := command.Execute() + return command.Existed(), err +} + +//------------------------------------------------------- +// Touch Operations +//------------------------------------------------------- + +// Touch updates a record's metadata. +// If the record exists, the record's TTL will be reset to the +// policy's expiration. +// If the record doesn't exist, it will return an error. +func (clnt *Client) Touch(policy *WritePolicy, key *Key) error { + policy = clnt.getUsableWritePolicy(policy) + command := newTouchCommand(clnt.cluster, policy, key) + return command.Execute() +} + +//------------------------------------------------------- +// Existence-Check Operations +//------------------------------------------------------- + +// Exists determine if a record key exists. +// The policy can be used to specify timeouts. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) Exists(policy *BasePolicy, key *Key) (bool, error) { + policy = clnt.getUsablePolicy(policy) + command := newExistsCommand(clnt.cluster, policy, key) + err := command.Execute() + return command.Exists(), err +} + +// BatchExists determines if multiple record keys exist in one batch request. +// The returned boolean array is in positional order with the original key array order. +// The policy can be used to specify timeouts. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) BatchExists(policy *BasePolicy, keys []*Key) ([]bool, error) { + policy = clnt.getUsablePolicy(policy) + + // same array can be used without synchronization; + // when a key exists, the corresponding index will be marked true + existsArray := make([]bool, len(keys)) + + if err := clnt.batchExecute(policy, keys, func(node *Node, bns *batchNamespace) command { + return newBatchCommandExists(node, bns, policy, keys, existsArray) + }); err != nil { + return nil, err + } + + return existsArray, nil +} + +//------------------------------------------------------- +// Read Record Operations +//------------------------------------------------------- + +// Get reads a record header and bins for specified key. +// The policy can be used to specify timeouts. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) Get(policy *BasePolicy, key *Key, binNames ...string) (*Record, error) { + policy = clnt.getUsablePolicy(policy) + + command := newReadCommand(clnt.cluster, policy, key, binNames) + if err := command.Execute(); err != nil { + return nil, err + } + return command.GetRecord(), nil +} + +// GetHeader reads a record generation and expiration only for specified key. +// Bins are not read. +// The policy can be used to specify timeouts. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) GetHeader(policy *BasePolicy, key *Key) (*Record, error) { + policy = clnt.getUsablePolicy(policy) + + command := newReadHeaderCommand(clnt.cluster, policy, key) + if err := command.Execute(); err != nil { + return nil, err + } + return command.GetRecord(), nil +} + +//------------------------------------------------------- +// Batch Read Operations +//------------------------------------------------------- + +// BatchGet reads multiple record headers and bins for specified keys in one batch request. +// The returned records are in positional order with the original key array order. +// If a key is not found, the positional record will be nil. +// The policy can be used to specify timeouts. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) BatchGet(policy *BasePolicy, keys []*Key, binNames ...string) ([]*Record, error) { + policy = clnt.getUsablePolicy(policy) + + // same array can be used without synchronization; + // when a key exists, the corresponding index will be set to record + records := make([]*Record, len(keys)) + + binSet := map[string]struct{}{} + for idx := range binNames { + binSet[binNames[idx]] = struct{}{} + } + + err := clnt.batchExecute(policy, keys, func(node *Node, bns *batchNamespace) command { + return newBatchCommandGet(node, bns, policy, keys, binSet, records, _INFO1_READ) + }) + if err != nil { + return nil, err + } + + return records, nil +} + +// BatchGetHeader reads multiple record header data for specified keys in one batch request. +// The returned records are in positional order with the original key array order. +// If a key is not found, the positional record will be nil. +// The policy can be used to specify timeouts. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) BatchGetHeader(policy *BasePolicy, keys []*Key) ([]*Record, error) { + policy = clnt.getUsablePolicy(policy) + + // same array can be used without synchronization; + // when a key exists, the corresponding index will be set to record + records := make([]*Record, len(keys)) + + err := clnt.batchExecute(policy, keys, func(node *Node, bns *batchNamespace) command { + return newBatchCommandGet(node, bns, policy, keys, nil, records, _INFO1_READ|_INFO1_NOBINDATA) + }) + if err != nil { + return nil, err + } + + return records, nil +} + +//------------------------------------------------------- +// Generic Database Operations +//------------------------------------------------------- + +// Operate performs multiple read/write operations on a single key in one batch request. +// An example would be to add an integer value to an existing record and then +// read the result, all in one database call. +// +// Write operations are always performed first, regardless of operation order +// relative to read operations. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) Operate(policy *WritePolicy, key *Key, operations ...*Operation) (*Record, error) { + policy = clnt.getUsableWritePolicy(policy) + command := newOperateCommand(clnt.cluster, policy, key, operations) + if err := command.Execute(); err != nil { + return nil, err + } + return command.GetRecord(), nil +} + +//------------------------------------------------------- +// Scan Operations +//------------------------------------------------------- + +// ScanAll reads all records in specified namespace and set from all nodes. +// If the policy's concurrentNodes is specified, each server node will be read in +// parallel. Otherwise, server nodes are read sequentially. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) ScanAll(apolicy *ScanPolicy, namespace string, setName string, binNames ...string) (*Recordset, error) { + policy := *clnt.getUsableScanPolicy(apolicy) + + nodes := clnt.cluster.GetNodes() + if len(nodes) == 0 { + return nil, NewAerospikeError(SERVER_NOT_AVAILABLE, "Scan failed because cluster is empty.") + } + + if policy.WaitUntilMigrationsAreOver { + // wait until all migrations are finished + if err := clnt.cluster.WaitUntillMigrationIsFinished(policy.Timeout); err != nil { + return nil, err + } + } + + // result recordset + taskId := uint64(xornd.Int64()) + res := newRecordset(policy.RecordQueueSize, len(nodes), taskId) + + // the whole call should be wrapped in a goroutine + if policy.ConcurrentNodes { + for _, node := range nodes { + go func(node *Node) { + clnt.scanNode(&policy, node, res, namespace, setName, taskId, binNames...) + }(node) + } + } else { + // scan nodes one by one + go func() { + for _, node := range nodes { + clnt.scanNode(&policy, node, res, namespace, setName, taskId, binNames...) + } + }() + } + + return res, nil +} + +// ScanNode reads all records in specified namespace and set for one node only. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) ScanNode(apolicy *ScanPolicy, node *Node, namespace string, setName string, binNames ...string) (*Recordset, error) { + policy := *clnt.getUsableScanPolicy(apolicy) + + // results channel must be async for performance + taskId := uint64(xornd.Int64()) + res := newRecordset(policy.RecordQueueSize, 1, taskId) + + go clnt.scanNode(&policy, node, res, namespace, setName, taskId, binNames...) + return res, nil +} + +// ScanNode reads all records in specified namespace and set for one node only. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) scanNode(policy *ScanPolicy, node *Node, recordset *Recordset, namespace string, setName string, taskId uint64, binNames ...string) error { + if policy.WaitUntilMigrationsAreOver { + // wait until migrations on node are finished + if err := node.WaitUntillMigrationIsFinished(policy.Timeout); err != nil { + recordset.signalEnd() + return err + } + } + + command := newScanCommand(node, policy, namespace, setName, binNames, recordset, taskId) + return command.Execute() +} + +//------------------------------------------------------------------- +// Large collection functions (Supported by Aerospike 3 servers only) +//------------------------------------------------------------------- + +// GetLargeList initializes large list operator. +// This operator can be used to create and manage a list +// within a single bin. +// +// This method is only supported by Aerospike 3 servers. +// If the policy is nil, the default relevant policy will be used. +// NOTICE: DEPRECATED ON SERVER. Will be removed in future. Use CDT operations instead. +func (clnt *Client) GetLargeList(policy *WritePolicy, key *Key, binName string, userModule string) *LargeList { + policy = clnt.getUsableWritePolicy(policy) + return NewLargeList(clnt, policy, key, binName, userModule) +} + +// GetLargeMap initializes a large map operator. +// This operator can be used to create and manage a map +// within a single bin. +// +// This method is only supported by Aerospike 3 servers. +// If the policy is nil, the default relevant policy will be used. +// NOTICE: DEPRECATED ON SERVER. Will be removed in future. Use CDT operations instead. +func (clnt *Client) GetLargeMap(policy *WritePolicy, key *Key, binName string, userModule string) *LargeMap { + policy = clnt.getUsableWritePolicy(policy) + return NewLargeMap(clnt, policy, key, binName, userModule) +} + +// GetLargeSet initializes large set operator. +// This operator can be used to create and manage a set +// within a single bin. +// +// This method is only supported by Aerospike 3 servers. +// If the policy is nil, the default relevant policy will be used. +// NOTICE: DEPRECATED ON SERVER. Will be removed in future. +func (clnt *Client) GetLargeSet(policy *WritePolicy, key *Key, binName string, userModule string) *LargeSet { + policy = clnt.getUsableWritePolicy(policy) + return NewLargeSet(clnt, policy, key, binName, userModule) +} + +// GetLargeStack initializes large stack operator. +// This operator can be used to create and manage a stack +// within a single bin. +// +// This method is only supported by Aerospike 3 servers. +// If the policy is nil, the default relevant policy will be used. +// NOTICE: DEPRECATED ON SERVER. Will be removed in future. +func (clnt *Client) GetLargeStack(policy *WritePolicy, key *Key, binName string, userModule string) *LargeStack { + policy = clnt.getUsableWritePolicy(policy) + return NewLargeStack(clnt, policy, key, binName, userModule) +} + +//--------------------------------------------------------------- +// User defined functions (Supported by Aerospike 3 servers only) +//--------------------------------------------------------------- + +// RegisterUDFFromFile reads a file from file system and registers +// the containing a package user defined functions with the server. +// This asynchronous server call will return before command is complete. +// The user can optionally wait for command completion by using the returned +// RegisterTask instance. +// +// This method is only supported by Aerospike 3 servers. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) RegisterUDFFromFile(policy *WritePolicy, clientPath string, serverPath string, language Language) (*RegisterTask, error) { + policy = clnt.getUsableWritePolicy(policy) + udfBody, err := ioutil.ReadFile(clientPath) + if err != nil { + return nil, err + } + + return clnt.RegisterUDF(policy, udfBody, serverPath, language) +} + +// RegisterUDF registers a package containing user defined functions with server. +// This asynchronous server call will return before command is complete. +// The user can optionally wait for command completion by using the returned +// RegisterTask instance. +// +// This method is only supported by Aerospike 3 servers. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) RegisterUDF(policy *WritePolicy, udfBody []byte, serverPath string, language Language) (*RegisterTask, error) { + policy = clnt.getUsableWritePolicy(policy) + content := base64.StdEncoding.EncodeToString(udfBody) + + var strCmd bytes.Buffer + // errors are to remove errcheck warnings + // they will always be nil as stated in golang docs + _, err := strCmd.WriteString("udf-put:filename=") + _, err = strCmd.WriteString(serverPath) + _, err = strCmd.WriteString(";content=") + _, err = strCmd.WriteString(content) + _, err = strCmd.WriteString(";content-len=") + _, err = strCmd.WriteString(strconv.Itoa(len(content))) + _, err = strCmd.WriteString(";udf-type=") + _, err = strCmd.WriteString(string(language)) + _, err = strCmd.WriteString(";") + + // Send UDF to one node. That node will distribute the UDF to other nodes. + responseMap, err := clnt.sendInfoCommand(policy.Timeout, strCmd.String()) + if err != nil { + return nil, err + } + + response := responseMap[strCmd.String()] + res := make(map[string]string) + vals := strings.Split(response, ";") + for _, pair := range vals { + t := strings.SplitN(pair, "=", 2) + if len(t) == 2 { + res[t[0]] = t[1] + } else if len(t) == 1 { + res[t[0]] = "" + } + } + + if _, exists := res["error"]; exists { + msg, _ := base64.StdEncoding.DecodeString(res["message"]) + return nil, NewAerospikeError(COMMAND_REJECTED, fmt.Sprintf("Registration failed: %s\nFile: %s\nLine: %s\nMessage: %s", + res["error"], res["file"], res["line"], msg)) + } + return NewRegisterTask(clnt.cluster, serverPath), nil +} + +// RemoveUDF removes a package containing user defined functions in the server. +// This asynchronous server call will return before command is complete. +// The user can optionally wait for command completion by using the returned +// RemoveTask instance. +// +// This method is only supported by Aerospike 3 servers. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) RemoveUDF(policy *WritePolicy, udfName string) (*RemoveTask, error) { + policy = clnt.getUsableWritePolicy(policy) + var strCmd bytes.Buffer + // errors are to remove errcheck warnings + // they will always be nil as stated in golang docs + _, err := strCmd.WriteString("udf-remove:filename=") + _, err = strCmd.WriteString(udfName) + _, err = strCmd.WriteString(";") + + // Send command to one node. That node will distribute it to other nodes. + responseMap, err := clnt.sendInfoCommand(policy.Timeout, strCmd.String()) + if err != nil { + return nil, err + } + + response := responseMap[strCmd.String()] + if response == "ok" { + return NewRemoveTask(clnt.cluster, udfName), nil + } + return nil, NewAerospikeError(SERVER_ERROR, response) +} + +// ListUDF lists all packages containing user defined functions in the server. +// This method is only supported by Aerospike 3 servers. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) ListUDF(policy *BasePolicy) ([]*UDF, error) { + policy = clnt.getUsablePolicy(policy) + + var strCmd bytes.Buffer + // errors are to remove errcheck warnings + // they will always be nil as stated in golang docs + _, err := strCmd.WriteString("udf-list") + + // Send command to one node. That node will distribute it to other nodes. + responseMap, err := clnt.sendInfoCommand(policy.Timeout, strCmd.String()) + if err != nil { + return nil, err + } + + response := responseMap[strCmd.String()] + vals := strings.Split(response, ";") + res := make([]*UDF, 0, len(vals)) + + for _, udfInfo := range vals { + if strings.Trim(udfInfo, " ") == "" { + continue + } + udfParts := strings.Split(udfInfo, ",") + + udf := &UDF{} + for _, values := range udfParts { + valueParts := strings.Split(values, "=") + if len(valueParts) == 2 { + switch valueParts[0] { + case "filename": + udf.Filename = valueParts[1] + case "hash": + udf.Hash = valueParts[1] + case "type": + udf.Language = Language(valueParts[1]) + } + } + } + res = append(res, udf) + } + + return res, nil +} + +// Execute executes a user defined function on server and return results. +// The function operates on a single record. +// The package name is used to locate the udf file location: +// +// udf file = /.lua +// +// This method is only supported by Aerospike 3 servers. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) Execute(policy *WritePolicy, key *Key, packageName string, functionName string, args ...Value) (interface{}, error) { + policy = clnt.getUsableWritePolicy(policy) + command := newExecuteCommand(clnt.cluster, policy, key, packageName, functionName, NewValueArray(args)) + if err := command.Execute(); err != nil { + return nil, err + } + + record := command.GetRecord() + + if record == nil || len(record.Bins) == 0 { + return nil, nil + } + + resultMap := record.Bins + + // User defined functions don't have to return a value. + if exists, obj := mapContainsKeyPartial(resultMap, "SUCCESS"); exists { + return obj, nil + } + + if _, obj := mapContainsKeyPartial(resultMap, "FAILURE"); obj != nil { + return nil, fmt.Errorf("%v", obj) + } + + return nil, NewAerospikeError(UDF_BAD_RESPONSE, "Invalid UDF return value") +} + +func mapContainsKeyPartial(theMap map[string]interface{}, key string) (bool, interface{}) { + for k, v := range theMap { + if strings.Contains(k, key) { + return true, v + } + } + return false, nil +} + +//---------------------------------------------------------- +// Query/Execute UDF (Supported by Aerospike 3 servers only) +//---------------------------------------------------------- + +// ExecuteUDF applies user defined function on records that match the statement filter. +// Records are not returned to the client. +// This asynchronous server call will return before command is complete. +// The user can optionally wait for command completion by using the returned +// ExecuteTask instance. +// +// This method is only supported by Aerospike 3 servers. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) ExecuteUDF(policy *QueryPolicy, + statement *Statement, + packageName string, + functionName string, + functionArgs ...Value, +) (*ExecuteTask, error) { + policy = clnt.getUsableQueryPolicy(policy) + + nodes := clnt.cluster.GetNodes() + if len(nodes) == 0 { + return nil, NewAerospikeError(SERVER_NOT_AVAILABLE, "ExecuteUDF failed because cluster is empty.") + } + + // wait until all migrations are finished + if err := clnt.cluster.WaitUntillMigrationIsFinished(policy.Timeout); err != nil { + return nil, err + } + + statement.SetAggregateFunction(packageName, functionName, functionArgs, false) + + errs := []error{} + for i := range nodes { + command := newServerCommand(nodes[i], policy, statement) + if err := command.Execute(); err != nil { + errs = append(errs, err) + } + } + + return NewExecuteTask(clnt.cluster, statement), mergeErrors(errs) +} + +//-------------------------------------------------------- +// Query Aggregate functions (Supported by Aerospike 3 servers only) +//-------------------------------------------------------- + +// SetLuaPath sets the Lua interpreter path to files +// This path is used to load UDFs for QueryAggregate command +func SetLuaPath(lpath string) { + lualib.SetPath(lpath) +} + +// QueryAggregate executes a Map/Reduce query and returns the results. +// The query executor puts records on the channel from separate goroutines. +// The caller can concurrently pop records off the channel through the +// Recordset.Records channel. +// +// This method is only supported by Aerospike 3 servers. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) QueryAggregate(policy *QueryPolicy, statement *Statement, packageName, functionName string, functionArgs ...interface{}) (*Recordset, error) { + statement.SetAggregateFunction(packageName, functionName, ToValueSlice(functionArgs), true) + + policy = clnt.getUsableQueryPolicy(policy) + + nodes := clnt.cluster.GetNodes() + if len(nodes) == 0 { + return nil, NewAerospikeError(SERVER_NOT_AVAILABLE, "QueryAggregate failed because cluster is empty.") + } + + if policy.WaitUntilMigrationsAreOver { + // wait until all migrations are finished + if err := clnt.cluster.WaitUntillMigrationIsFinished(policy.Timeout); err != nil { + return nil, err + } + } + + // results channel must be async for performance + recSet := newRecordset(policy.RecordQueueSize, len(nodes), statement.TaskId) + + // get a lua instance + luaInstance := lualib.LuaPool.Get().(*lua.LState) + if luaInstance == nil { + return nil, fmt.Errorf("Error fetching a lua instance from pool") + } + + // Input Channel + inputChan := make(chan interface{}, 4096) // 4096 = number of partitions + istream := lualib.NewLuaStream(luaInstance, inputChan) + + // Output Channe; + outputChan := make(chan interface{}) + ostream := lualib.NewLuaStream(luaInstance, outputChan) + + // results channel must be async for performance + var wg sync.WaitGroup + wg.Add(len(nodes)) + for _, node := range nodes { + // copy policies to avoid race conditions + newPolicy := *policy + command := newQueryAggregateCommand(node, &newPolicy, statement, recSet) + command.luaInstance = luaInstance + command.inputChan = inputChan + + go func() { + defer wg.Done() + command.Execute() + }() + } + + go func() { + wg.Wait() + close(inputChan) + }() + + go func() { + // we cannot signal end and close the recordset + // while processing is still going on + // We will do it only here, after all processing is over + defer func() { + for i := 0; i < len(nodes); i++ { + recSet.signalEnd() + } + }() + + for val := range outputChan { + recSet.Records <- &Record{Bins: BinMap{"SUCCESS": val}} + } + }() + + go func() { + defer close(outputChan) + defer luaInstance.Close() + + err := luaInstance.DoFile(lualib.LuaPath() + packageName + ".lua") + if err != nil { + recSet.Errors <- err + return + } + + fn := luaInstance.GetGlobal(functionName) + + luaArgs := []lua.LValue{fn, lualib.NewValue(luaInstance, 2), istream, ostream} + for _, a := range functionArgs { + luaArgs = append(luaArgs, lualib.NewValue(luaInstance, unwrapValue(a))) + } + + if err := luaInstance.CallByParam(lua.P{ + Fn: luaInstance.GetGlobal("apply_stream"), + NRet: 1, + Protect: true, + }, + luaArgs..., + ); err != nil { + recSet.Errors <- err + return + } + + luaInstance.Get(-1) // returned value + luaInstance.Pop(1) // remove received value + }() + + return recSet, nil +} + +//-------------------------------------------------------- +// Query functions (Supported by Aerospike 3 servers only) +//-------------------------------------------------------- + +// Query executes a query and returns a Recordset. +// The query executor puts records on the channel from separate goroutines. +// The caller can concurrently pop records off the channel through the +// Recordset.Records channel. +// +// This method is only supported by Aerospike 3 servers. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) Query(policy *QueryPolicy, statement *Statement) (*Recordset, error) { + policy = clnt.getUsableQueryPolicy(policy) + + nodes := clnt.cluster.GetNodes() + if len(nodes) == 0 { + return nil, NewAerospikeError(SERVER_NOT_AVAILABLE, "Query failed because cluster is empty.") + } + + if policy.WaitUntilMigrationsAreOver { + // wait until all migrations are finished + if err := clnt.cluster.WaitUntillMigrationIsFinished(policy.Timeout); err != nil { + return nil, err + } + } + + // results channel must be async for performance + recSet := newRecordset(policy.RecordQueueSize, len(nodes), statement.TaskId) + + // results channel must be async for performance + for _, node := range nodes { + // copy policies to avoid race conditions + newPolicy := *policy + command := newQueryRecordCommand(node, &newPolicy, statement, recSet) + go func() { + command.Execute() + }() + } + + return recSet, nil +} + +// QueryNode executes a query on a specific node and returns a recordset. +// The caller can concurrently pop records off the channel through the +// record channel. +// +// This method is only supported by Aerospike 3 servers. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) QueryNode(policy *QueryPolicy, node *Node, statement *Statement) (*Recordset, error) { + policy = clnt.getUsableQueryPolicy(policy) + + if policy.WaitUntilMigrationsAreOver { + // wait until all migrations are finished + if err := clnt.cluster.WaitUntillMigrationIsFinished(policy.Timeout); err != nil { + return nil, err + } + } + + // results channel must be async for performance + recSet := newRecordset(policy.RecordQueueSize, 1, statement.TaskId) + + // copy policies to avoid race conditions + newPolicy := *policy + command := newQueryRecordCommand(node, &newPolicy, statement, recSet) + go func() { + command.Execute() + }() + + return recSet, nil +} + +// CreateIndex creates a secondary index. +// This asynchronous server call will return before the command is complete. +// The user can optionally wait for command completion by using the returned +// IndexTask instance. +// This method is only supported by Aerospike 3 servers. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) CreateIndex( + policy *WritePolicy, + namespace string, + setName string, + indexName string, + binName string, + indexType IndexType, +) (*IndexTask, error) { + policy = clnt.getUsableWritePolicy(policy) + return clnt.CreateComplexIndex(policy, namespace, setName, indexName, binName, indexType, ICT_DEFAULT) +} + +// CreateComplexIndex creates a secondary index, with the ability to put indexes +// on bin containing complex data types, e.g: Maps and Lists. +// This asynchronous server call will return before the command is complete. +// The user can optionally wait for command completion by using the returned +// IndexTask instance. +// This method is only supported by Aerospike 3 servers. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) CreateComplexIndex( + policy *WritePolicy, + namespace string, + setName string, + indexName string, + binName string, + indexType IndexType, + indexCollectionType IndexCollectionType, +) (*IndexTask, error) { + policy = clnt.getUsableWritePolicy(policy) + + var strCmd bytes.Buffer + _, err := strCmd.WriteString("sindex-create:ns=") + _, err = strCmd.WriteString(namespace) + + if len(setName) > 0 { + _, err = strCmd.WriteString(";set=") + _, err = strCmd.WriteString(setName) + } + + _, err = strCmd.WriteString(";indexname=") + _, err = strCmd.WriteString(indexName) + _, err = strCmd.WriteString(";numbins=1") + + if indexCollectionType != ICT_DEFAULT { + _, err = strCmd.WriteString(";indextype=") + _, err = strCmd.WriteString(ictToString(indexCollectionType)) + } + + _, err = strCmd.WriteString(";indexdata=") + _, err = strCmd.WriteString(binName) + _, err = strCmd.WriteString(",") + _, err = strCmd.WriteString(string(indexType)) + _, err = strCmd.WriteString(";priority=normal") + + // Send index command to one node. That node will distribute the command to other nodes. + responseMap, err := clnt.sendInfoCommand(policy.Timeout, strCmd.String()) + if err != nil { + return nil, err + } + + response := responseMap[strCmd.String()] + if strings.ToUpper(response) == "OK" { + // Return task that could optionally be polled for completion. + return NewIndexTask(clnt.cluster, namespace, indexName), nil + } + + if strings.HasPrefix(response, "FAIL:200") { + // Index has already been created. Do not need to poll for completion. + return nil, NewAerospikeError(INDEX_FOUND) + } + + return nil, NewAerospikeError(INDEX_GENERIC, "Create index failed: "+response) +} + +// DropIndex deletes a secondary index. It will block until index is dropped on all nodes. +// This method is only supported by Aerospike 3 servers. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) DropIndex( + policy *WritePolicy, + namespace string, + setName string, + indexName string, +) error { + policy = clnt.getUsableWritePolicy(policy) + var strCmd bytes.Buffer + _, err := strCmd.WriteString("sindex-delete:ns=") + _, err = strCmd.WriteString(namespace) + + if len(setName) > 0 { + _, err = strCmd.WriteString(";set=") + _, err = strCmd.WriteString(setName) + } + _, err = strCmd.WriteString(";indexname=") + _, err = strCmd.WriteString(indexName) + + // Send index command to one node. That node will distribute the command to other nodes. + responseMap, err := clnt.sendInfoCommand(policy.Timeout, strCmd.String()) + if err != nil { + return err + } + + response := responseMap[strCmd.String()] + + if strings.ToUpper(response) == "OK" { + // Return task that could optionally be polled for completion. + task := NewDropIndexTask(clnt.cluster, namespace, indexName) + return <-task.OnComplete() + } + + if strings.HasPrefix(response, "FAIL:201") { + // Index did not previously exist. Return without error. + return nil + } + + return NewAerospikeError(INDEX_GENERIC, "Drop index failed: "+response) +} + +// Remove records in specified namespace/set efficiently. This method is many orders of magnitude +// faster than deleting records one at a time. Works with Aerospike Server versions >= 3.12. +// This asynchronous server call may return before the truncation is complete. The user can still +// write new records after the server call returns because new records will have last update times +// greater than the truncate cutoff (set at the time of truncate call). +func (clnt *Client) Truncate(policy *WritePolicy, namespace, set string, beforeLastUpdate *time.Time) error { + policy = clnt.getUsableWritePolicy(policy) + + var strCmd bytes.Buffer + _, err := strCmd.WriteString("truncate:namespace=") + _, err = strCmd.WriteString(namespace) + + if len(set) > 0 { + _, err = strCmd.WriteString(";set=") + _, err = strCmd.WriteString(set) + } + if beforeLastUpdate != nil { + _, err = strCmd.WriteString(";lut=") + _, err = strCmd.WriteString(strconv.FormatInt(beforeLastUpdate.UnixNano(), 10)) + } + + // Send index command to one node. That node will distribute the command to other nodes. + responseMap, err := clnt.sendInfoCommand(policy.Timeout, strCmd.String()) + if err != nil { + return err + } + + response := responseMap[strCmd.String()] + if strings.ToUpper(response) == "OK" { + return nil + } + + return NewAerospikeError(SERVER_ERROR, "Truncate failed: "+response) +} + +//------------------------------------------------------- +// User administration +//------------------------------------------------------- + +// CreateUser creates a new user with password and roles. Clear-text password will be hashed using bcrypt +// before sending to server. +func (clnt *Client) CreateUser(policy *AdminPolicy, user string, password string, roles []string) error { + policy = clnt.getUsableAdminPolicy(policy) + + hash, err := hashPassword(password) + if err != nil { + return err + } + command := newAdminCommand(nil) + return command.createUser(clnt.cluster, policy, user, hash, roles) +} + +// DropUser removes a user from the cluster. +func (clnt *Client) DropUser(policy *AdminPolicy, user string) error { + policy = clnt.getUsableAdminPolicy(policy) + + command := newAdminCommand(nil) + return command.dropUser(clnt.cluster, policy, user) +} + +// ChangePassword changes a user's password. Clear-text password will be hashed using bcrypt before sending to server. +func (clnt *Client) ChangePassword(policy *AdminPolicy, user string, password string) error { + policy = clnt.getUsableAdminPolicy(policy) + + if clnt.cluster.user == "" { + return NewAerospikeError(INVALID_USER) + } + + hash, err := hashPassword(password) + if err != nil { + return err + } + command := newAdminCommand(nil) + + if user == clnt.cluster.user { + // Change own password. + if err := command.changePassword(clnt.cluster, policy, user, hash); err != nil { + return err + } + } else { + // Change other user's password by user admin. + if err := command.setPassword(clnt.cluster, policy, user, hash); err != nil { + return err + } + } + + clnt.cluster.changePassword(user, password, hash) + + return nil +} + +// GrantRoles adds roles to user's list of roles. +func (clnt *Client) GrantRoles(policy *AdminPolicy, user string, roles []string) error { + policy = clnt.getUsableAdminPolicy(policy) + + command := newAdminCommand(nil) + return command.grantRoles(clnt.cluster, policy, user, roles) +} + +// RevokeRoles removes roles from user's list of roles. +func (clnt *Client) RevokeRoles(policy *AdminPolicy, user string, roles []string) error { + policy = clnt.getUsableAdminPolicy(policy) + + command := newAdminCommand(nil) + return command.revokeRoles(clnt.cluster, policy, user, roles) +} + +// QueryUser retrieves roles for a given user. +func (clnt *Client) QueryUser(policy *AdminPolicy, user string) (*UserRoles, error) { + policy = clnt.getUsableAdminPolicy(policy) + + command := newAdminCommand(nil) + return command.queryUser(clnt.cluster, policy, user) +} + +// QueryUsers retrieves all users and their roles. +func (clnt *Client) QueryUsers(policy *AdminPolicy) ([]*UserRoles, error) { + policy = clnt.getUsableAdminPolicy(policy) + + command := newAdminCommand(nil) + return command.queryUsers(clnt.cluster, policy) +} + +// QueryRole retrieves privileges for a given role. +func (clnt *Client) QueryRole(policy *AdminPolicy, role string) (*Role, error) { + policy = clnt.getUsableAdminPolicy(policy) + + command := newAdminCommand(nil) + return command.queryRole(clnt.cluster, policy, role) +} + +// QueryRoles retrieves all roles and their privileges. +func (clnt *Client) QueryRoles(policy *AdminPolicy) ([]*Role, error) { + policy = clnt.getUsableAdminPolicy(policy) + + command := newAdminCommand(nil) + return command.queryRoles(clnt.cluster, policy) +} + +// CreateRole creates a user-defined role. +func (clnt *Client) CreateRole(policy *AdminPolicy, roleName string, privileges []Privilege) error { + policy = clnt.getUsableAdminPolicy(policy) + + command := newAdminCommand(nil) + return command.createRole(clnt.cluster, policy, roleName, privileges) +} + +// DropRole removes a user-defined role. +func (clnt *Client) DropRole(policy *AdminPolicy, roleName string) error { + policy = clnt.getUsableAdminPolicy(policy) + + command := newAdminCommand(nil) + return command.dropRole(clnt.cluster, policy, roleName) +} + +// GrantPrivileges grant privileges to a user-defined role. +func (clnt *Client) GrantPrivileges(policy *AdminPolicy, roleName string, privileges []Privilege) error { + policy = clnt.getUsableAdminPolicy(policy) + + command := newAdminCommand(nil) + return command.grantPrivileges(clnt.cluster, policy, roleName, privileges) +} + +// RevokePrivileges revokes privileges from a user-defined role. +func (clnt *Client) RevokePrivileges(policy *AdminPolicy, roleName string, privileges []Privilege) error { + policy = clnt.getUsableAdminPolicy(policy) + + command := newAdminCommand(nil) + return command.revokePrivileges(clnt.cluster, policy, roleName, privileges) +} + +// Cluster exposes the cluster object to the user +func (clnt *Client) Cluster() *Cluster { + return clnt.cluster +} + +// String implements the Stringer interface for client +func (clnt *Client) String() string { + if clnt.cluster != nil { + return clnt.cluster.String() + } + return "" +} + +//------------------------------------------------------- +// Internal Methods +//------------------------------------------------------- + +func (clnt *Client) sendInfoCommand(timeout time.Duration, command string) (map[string]string, error) { + node, err := clnt.cluster.GetRandomNode() + if err != nil { + return nil, err + } + + node.tendConnLock.Lock() + defer node.tendConnLock.Unlock() + + if err := node.initTendConn(timeout); err != nil { + return nil, err + } + + results, err := RequestInfo(node.tendConn, command) + if err != nil { + node.tendConn.Close() + return nil, err + } + + return results, nil +} + +// batchExecute Uses sync.WaitGroup to run commands using multiple goroutines, +// and waits for their return +func (clnt *Client) batchExecute(policy *BasePolicy, keys []*Key, cmdGen func(node *Node, bns *batchNamespace) command) error { + + batchNodes, err := newBatchNodeList(clnt.cluster, policy, keys) + if err != nil { + return err + } + + var wg sync.WaitGroup + + // Use a goroutine per namespace per node + errs := []error{} + errm := new(sync.Mutex) + + wg.Add(len(batchNodes)) + for _, batchNode := range batchNodes { + // copy to avoid race condition + bn := *batchNode + for _, bns := range bn.BatchNamespaces { + go func(bn *Node, bns *batchNamespace) { + defer wg.Done() + command := cmdGen(bn, bns) + if err := command.Execute(); err != nil { + errm.Lock() + errs = append(errs, err) + errm.Unlock() + } + }(bn.Node, bns) + } + } + + wg.Wait() + return mergeErrors(errs) +} + +func (clnt *Client) getUsablePolicy(policy *BasePolicy) *BasePolicy { + if policy == nil { + if clnt.DefaultPolicy != nil { + return clnt.DefaultPolicy + } + return NewPolicy() + } + return policy +} + +func (clnt *Client) getUsableWritePolicy(policy *WritePolicy) *WritePolicy { + if policy == nil { + if clnt.DefaultWritePolicy != nil { + return clnt.DefaultWritePolicy + } + return NewWritePolicy(0, 0) + } + return policy +} + +func (clnt *Client) getUsableScanPolicy(policy *ScanPolicy) *ScanPolicy { + if policy == nil { + if clnt.DefaultScanPolicy != nil { + return clnt.DefaultScanPolicy + } + return NewScanPolicy() + } + return policy +} + +func (clnt *Client) getUsableQueryPolicy(policy *QueryPolicy) *QueryPolicy { + if policy == nil { + if clnt.DefaultQueryPolicy != nil { + return clnt.DefaultQueryPolicy + } + return NewQueryPolicy() + } + return policy +} + +func (clnt *Client) getUsableAdminPolicy(policy *AdminPolicy) *AdminPolicy { + if policy == nil { + if clnt.DefaultAdminPolicy != nil { + return clnt.DefaultAdminPolicy + } + return NewAdminPolicy() + } + return policy +} + +//------------------------------------------------------- +// Utility Functions +//------------------------------------------------------- + +// mergeErrors merges several errors into one +func mergeErrors(errs []error) error { + if len(errs) == 0 { + return nil + } + var msg bytes.Buffer + for _, err := range errs { + if _, err := msg.WriteString(err.Error()); err != nil { + return err + } + if _, err := msg.WriteString("\n"); err != nil { + return err + } + } + return errors.New(msg.String()) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/client_policy.go b/vendor/github.com/aerospike/aerospike-client-go/client_policy.go new file mode 100644 index 00000000000..1863bae548d --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/client_policy.go @@ -0,0 +1,118 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "crypto/tls" + "time" +) + +const defaultIdleTimeout = 14 * time.Second + +// ClientPolicy encapsulates parameters for client policy command. +type ClientPolicy struct { + // User authentication to cluster. Leave empty for clusters running without restricted access. + User string + + // Password authentication to cluster. The password will be stored by the client and sent to server + // in hashed format. Leave empty for clusters running without restricted access. + Password string + + // ClusterName sets the expected cluster ID. If not null, server nodes must return this cluster ID in order to + // join the client's view of the cluster. Should only be set when connecting to servers that + // support the "cluster-name" info command. (v3.10+) + ClusterName string //="" + + // Initial host connection timeout duration. The timeout when opening a connection + // to the server host for the first time. + Timeout time.Duration //= 30 seconds + + // Connection idle timeout. Every time a connection is used, its idle + // deadline will be extended by this duration. When this deadline is reached, + // the connection will be closed and discarded from the connection pool. + IdleTimeout time.Duration //= 14 seconds + + // ConnectionQueueCache specifies the size of the Connection Queue cache PER NODE. + ConnectionQueueSize int //= 256 + + // If set to true, will not create a new connection + // to the node if there are already `ConnectionQueueSize` active connections. + LimitConnectionsToQueueSize bool //= true + + // Throw exception if host connection fails during addHost(). + FailIfNotConnected bool //= true + + // TendInterval determines interval for checking for cluster state changes. + // Minimum possible interval is 10 Milliseconds. + TendInterval time.Duration //= 1 second + + // A IP translation table is used in cases where different clients + // use different server IP addresses. This may be necessary when + // using clients from both inside and outside a local area + // network. Default is no translation. + // The key is the IP address returned from friend info requests to other servers. + // The value is the real IP address used to connect to the server. + IpMap map[string]string + + // UseServicesAlternate determines if the client should use "services-alternate" instead of "services" + // in info request during cluster tending. + //"services-alternate" returns server configured external IP addresses that client + // uses to talk to nodes. "services-alternate" can be used in place of providing a client "ipMap". + // This feature is recommended instead of using the client-side IpMap above. + // + // "services-alternate" is available with Aerospike Server versions >= 3.7.1. + UseServicesAlternate bool // false + + // RequestProleReplicas determines if prole replicas should be requested from each server node in the cluster tend goroutine. + // This option is required if there is a need to distribute reads across proles. + // If RequestProleReplicas is enabled, all prole partition maps will be cached on the client which results in + // extra storage multiplied by the replication factor. + // The default is false (only request master replicas and never prole replicas). + RequestProleReplicas bool // false + + // TlsConfig specifies TLS secure connection policy for TLS enabled servers. + // For better performance, we suggest prefering the server-side ciphers by + // setting PreferServerCipherSuites = true. + TlsConfig *tls.Config //= nil + + // IgnoreOtherSubnetAliases helps to ignore aliases that are outside main subnet + IgnoreOtherSubnetAliases bool //= false +} + +// NewClientPolicy generates a new ClientPolicy with default values. +func NewClientPolicy() *ClientPolicy { + return &ClientPolicy{ + Timeout: 30 * time.Second, + IdleTimeout: defaultIdleTimeout, + ConnectionQueueSize: 256, + FailIfNotConnected: true, + TendInterval: time.Second, + LimitConnectionsToQueueSize: true, + RequestProleReplicas: false, + IgnoreOtherSubnetAliases: false, + } +} + +// RequiresAuthentication returns true if a USer or Password is set for ClientPolicy. +func (cp *ClientPolicy) RequiresAuthentication() bool { + return (cp.User != "") || (cp.Password != "") +} + +func (cp *ClientPolicy) serviceString() string { + if cp.UseServicesAlternate { + return "services-alternate" + } + return "services" +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/client_reflect.go b/vendor/github.com/aerospike/aerospike-client-go/client_reflect.go new file mode 100644 index 00000000000..2049eefbe22 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/client_reflect.go @@ -0,0 +1,241 @@ +// +build !as_performance + +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "errors" + "reflect" + + . "github.com/aerospike/aerospike-client-go/types" + xornd "github.com/aerospike/aerospike-client-go/types/rand" +) + +// PutObject writes record bin(s) to the server. +// The policy specifies the transaction timeout, record expiration and how the transaction is +// handled when the record already exists. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) PutObject(policy *WritePolicy, key *Key, obj interface{}) (err error) { + policy = clnt.getUsableWritePolicy(policy) + + bins := marshal(obj, clnt.cluster.supportsFloat.Get()) + command := newWriteCommand(clnt.cluster, policy, key, bins, nil, WRITE) + res := command.Execute() + binPool.Put(bins) + return res +} + +// GetObject reads a record for specified key and puts the result into the provided object. +// The policy can be used to specify timeouts. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) GetObject(policy *BasePolicy, key *Key, obj interface{}) error { + policy = clnt.getUsablePolicy(policy) + + rval := reflect.ValueOf(obj) + binNames := objectMappings.getFields(rval.Type()) + + command := newReadCommand(clnt.cluster, policy, key, binNames) + command.object = &rval + return command.Execute() +} + +// BatchGetObject reads multiple record headers and bins for specified keys in one batch request. +// The returned objects are in positional order with the original key array order. +// If a key is not found, the positional object will not change, and the positional found boolean will be false. +// The policy can be used to specify timeouts. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) BatchGetObjects(policy *BasePolicy, keys []*Key, objects []interface{}) (found []bool, err error) { + policy = clnt.getUsablePolicy(policy) + + // check the size of key and objects + if (len(keys) != len(objects)) || (len(keys) == 0) { + return nil, errors.New("Wrong Number of arguments to BatchGetObject. Number of keys and objects do not match.") + } + + binSet := map[string]struct{}{} + objectsVal := make([]*reflect.Value, len(objects)) + for i := range objects { + rval := reflect.ValueOf(objects[i]) + objectsVal[i] = &rval + for _, bn := range objectMappings.getFields(rval.Type()) { + binSet[bn] = struct{}{} + } + } + + objectsFound := make([]bool, len(keys)) + err = clnt.batchExecute(policy, keys, func(node *Node, bns *batchNamespace) command { + cmd := newBatchCommandGet(node, bns, policy, keys, binSet, nil, _INFO1_READ) + cmd.objects = objectsVal + cmd.objectsFound = objectsFound + return cmd + }) + if err != nil { + return nil, err + } + + return objectsFound, nil +} + +// ScanAllObjects reads all records in specified namespace and set from all nodes. +// If the policy's concurrentNodes is specified, each server node will be read in +// parallel. Otherwise, server nodes are read sequentially. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) ScanAllObjects(apolicy *ScanPolicy, objChan interface{}, namespace string, setName string, binNames ...string) (*Recordset, error) { + policy := *clnt.getUsableScanPolicy(apolicy) + + nodes := clnt.cluster.GetNodes() + if len(nodes) == 0 { + return nil, NewAerospikeError(SERVER_NOT_AVAILABLE, "Scan failed because cluster is empty.") + } + + if policy.WaitUntilMigrationsAreOver { + // wait until all migrations are finished + if err := clnt.cluster.WaitUntillMigrationIsFinished(policy.Timeout); err != nil { + return nil, err + } + } + + // result recordset + taskId := uint64(xornd.Int64()) + res := &Recordset{ + objectset: *newObjectset(reflect.ValueOf(objChan), len(nodes), taskId), + } + + // the whole call should be wrapped in a goroutine + if policy.ConcurrentNodes { + for _, node := range nodes { + go func(node *Node) { + // Errors are handled inside the command itself + clnt.scanNodeObjects(&policy, node, res, namespace, setName, taskId, binNames...) + }(node) + } + } else { + // scan nodes one by one + go func() { + for _, node := range nodes { + // Errors are handled inside the command itself + clnt.scanNodeObjects(&policy, node, res, namespace, setName, taskId, binNames...) + } + }() + } + + return res, nil +} + +// scanNodeObjects reads all records in specified namespace and set for one node only, +// and marshalls the results into the objects of the provided channel in Recordset. +// If the policy is nil, the default relevant policy will be used. +// The resulting records will be marshalled into the objChan. +// objChan will be closed after all the records are read. +func (clnt *Client) ScanNodeObjects(apolicy *ScanPolicy, node *Node, objChan interface{}, namespace string, setName string, binNames ...string) (*Recordset, error) { + policy := *clnt.getUsableScanPolicy(apolicy) + + // results channel must be async for performance + taskId := uint64(xornd.Int64()) + res := &Recordset{ + objectset: *newObjectset(reflect.ValueOf(objChan), 1, taskId), + } + + go clnt.scanNodeObjects(&policy, node, res, namespace, setName, taskId, binNames...) + return res, nil +} + +// scanNodeObjects reads all records in specified namespace and set for one node only, +// and marshalls the results into the objects of the provided channel in Recordset. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) scanNodeObjects(policy *ScanPolicy, node *Node, recordset *Recordset, namespace string, setName string, taskId uint64, binNames ...string) error { + if policy.WaitUntilMigrationsAreOver { + // wait until migrations on node are finished + if err := node.WaitUntillMigrationIsFinished(policy.Timeout); err != nil { + recordset.signalEnd() + return err + } + } + + command := newScanObjectsCommand(node, policy, namespace, setName, binNames, recordset, taskId) + return command.Execute() +} + +// QueryNodeObjects executes a query on all nodes in the cluster and marshals the records into the given channel. +// The query executor puts records on the channel from separate goroutines. +// The caller can concurrently pop objects. +// +// This method is only supported by Aerospike 3 servers. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) QueryObjects(policy *QueryPolicy, statement *Statement, objChan interface{}) (*Recordset, error) { + policy = clnt.getUsableQueryPolicy(policy) + + nodes := clnt.cluster.GetNodes() + if len(nodes) == 0 { + return nil, NewAerospikeError(SERVER_NOT_AVAILABLE, "Query failed because cluster is empty.") + } + + if policy.WaitUntilMigrationsAreOver { + // wait until all migrations are finished + if err := clnt.cluster.WaitUntillMigrationIsFinished(policy.Timeout); err != nil { + return nil, err + } + } + + // results channel must be async for performance + recSet := &Recordset{ + objectset: *newObjectset(reflect.ValueOf(objChan), len(nodes), statement.TaskId), + } + + // the whole call sho + // results channel must be async for performance + for _, node := range nodes { + // copy policies to avoid race conditions + newPolicy := *policy + command := newQueryObjectsCommand(node, &newPolicy, statement, recSet) + go func() { + // Do not send the error to the channel; it is already handled in the Execute method + command.Execute() + }() + } + + return recSet, nil +} + +// QueryNodeObjects executes a query on a specific node and marshals the records into the given channel. +// The caller can concurrently pop records off the channel. +// +// This method is only supported by Aerospike 3 servers. +// If the policy is nil, the default relevant policy will be used. +func (clnt *Client) QueryNodeObjects(policy *QueryPolicy, node *Node, statement *Statement, objChan interface{}) (*Recordset, error) { + policy = clnt.getUsableQueryPolicy(policy) + + if policy.WaitUntilMigrationsAreOver { + // wait until all migrations are finished + if err := clnt.cluster.WaitUntillMigrationIsFinished(policy.Timeout); err != nil { + return nil, err + } + } + + // results channel must be async for performance + recSet := &Recordset{ + objectset: *newObjectset(reflect.ValueOf(objChan), 1, statement.TaskId), + } + + // copy policies to avoid race conditions + newPolicy := *policy + command := newQueryRecordCommand(node, &newPolicy, statement, recSet) + go func() { + command.Execute() + }() + + return recSet, nil +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/cluster.go b/vendor/github.com/aerospike/aerospike-client-go/cluster.go new file mode 100644 index 00000000000..92733922331 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/cluster.go @@ -0,0 +1,989 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + . "github.com/aerospike/aerospike-client-go/logger" + + . "github.com/aerospike/aerospike-client-go/types" + . "github.com/aerospike/aerospike-client-go/types/atomic" +) + +type partitionMap map[string][][]*Node + +// String implements stringer interface for partitionMap +func (pm partitionMap) clone() partitionMap { + // Make shallow copy of map. + pmap := make(partitionMap, len(pm)) + for ns, replArr := range pm { + newReplArr := make([][]*Node, len(replArr)) + for i, nArr := range replArr { + newNArr := make([]*Node, len(nArr)) + copy(newNArr, nArr) + newReplArr[i] = newNArr + } + pmap[ns] = newReplArr + } + return pmap +} + +// String implements stringer interface for partitionMap +func (pm partitionMap) merge(partMap partitionMap) { + // merge partitions; iterate over the new partition and update the old one + for ns, replicaArray := range partMap { + if pm[ns] == nil { + pm[ns] = make([][]*Node, len(replicaArray)) + } + + for i, nodeArray := range replicaArray { + if pm[ns][i] == nil { + pm[ns][i] = make([]*Node, len(nodeArray)) + } + + for j, node := range nodeArray { + if node != nil { + pm[ns][i][j] = node + } + } + } + } +} + +// String implements stringer interface for partitionMap +func (pm partitionMap) String() string { + res := bytes.Buffer{} + for ns, replicaArray := range pm { + for i, nodeArray := range replicaArray { + for j, node := range nodeArray { + res.WriteString(ns) + res.WriteString(",") + res.WriteString(strconv.Itoa(i)) + res.WriteString(",") + res.WriteString(strconv.Itoa(j)) + res.WriteString(",") + if node != nil { + res.WriteString(node.String()) + } else { + res.WriteString("NIL") + } + res.WriteString("\n") + } + } + } + return res.String() +} + +// Cluster encapsulates the aerospike cluster nodes and manages +// them. +type Cluster struct { + // Initial host nodes specified by user. + seeds *SyncVal //[]*Host + + // All aliases for all nodes in cluster. + // Only accessed within cluster tend thread. + aliases *SyncVal //map[Host]*Node + + // Map of active nodes in cluster. + // Only accessed within cluster tend thread. + nodesMap *SyncVal //map[string]*Node + + // Active nodes in cluster. + nodes *SyncVal //[]*Node + + // Hints for best node for a partition + partitionWriteMap atomic.Value //partitionMap + partitionUpdateMutex sync.Mutex + + clientPolicy ClientPolicy + + nodeIndex uint64 // only used via atomic operations + replicaIndex uint64 // only used via atomic operations + + wgTend sync.WaitGroup + tendChannel chan struct{} + closed AtomicBool + + // Aerospike v3.6.0+ + supportsFloat, supportsBatchIndex, supportsReplicasAll, supportsGeo *AtomicBool + requestProleReplicas *AtomicBool + + // User name in UTF-8 encoded bytes. + user string + + // Password in hashed format in bytes. + password *SyncVal // []byte +} + +// NewCluster generates a Cluster instance. +func NewCluster(policy *ClientPolicy, hosts []*Host) (*Cluster, error) { + // Default TLS names when TLS enabled. + newHosts := make([]*Host, 0, len(hosts)) + if policy.TlsConfig != nil && !policy.TlsConfig.InsecureSkipVerify { + useClusterName := len(policy.ClusterName) > 0 + + for _, host := range hosts { + nh := *host + if nh.TLSName == "" { + if useClusterName { + nh.TLSName = policy.ClusterName + } else { + nh.TLSName = host.Name + } + } + newHosts = append(newHosts, &nh) + } + hosts = newHosts + } + + newCluster := &Cluster{ + clientPolicy: *policy, + tendChannel: make(chan struct{}), + + seeds: NewSyncVal(hosts), + aliases: NewSyncVal(make(map[Host]*Node)), + nodesMap: NewSyncVal(make(map[string]*Node)), + nodes: NewSyncVal([]*Node{}), + + password: NewSyncVal(nil), + + supportsFloat: NewAtomicBool(false), + supportsBatchIndex: NewAtomicBool(false), + supportsReplicasAll: NewAtomicBool(false), + supportsGeo: NewAtomicBool(false), + requestProleReplicas: NewAtomicBool(policy.RequestProleReplicas), + } + + newCluster.partitionWriteMap.Store(make(partitionMap)) + + // setup auth info for cluster + if policy.RequiresAuthentication() { + newCluster.user = policy.User + hashedPass, err := hashPassword(policy.Password) + if err != nil { + return nil, err + } + newCluster.password = NewSyncVal(hashedPass) + } + + // try to seed connections for first use + err := newCluster.waitTillStabilized() + + // apply policy rules + if policy.FailIfNotConnected && !newCluster.IsConnected() { + if err != nil { + return nil, err + } + return nil, fmt.Errorf("Failed to connect to host(s): %v. The network connection(s) to cluster nodes may have timed out, or the cluster may be in a state of flux.", hosts) + } + + // start up cluster maintenance go routine + newCluster.wgTend.Add(1) + go newCluster.clusterBoss(&newCluster.clientPolicy) + + Logger.Debug("New cluster initialized and ready to be used...") + return newCluster, err +} + +// String implements the stringer interface +func (clstr *Cluster) String() string { + return fmt.Sprintf("%v", clstr.nodes) +} + +// Maintains the cluster on intervals. +// All clean up code for cluster is here as well. +func (clstr *Cluster) clusterBoss(policy *ClientPolicy) { + defer clstr.wgTend.Done() + + tendInterval := policy.TendInterval + if tendInterval <= 10*time.Millisecond { + tendInterval = 10 * time.Millisecond + } + +Loop: + for { + select { + case <-clstr.tendChannel: + // tend channel closed + Logger.Debug("Tend channel closed. Shutting down the cluster...") + break Loop + case <-time.After(tendInterval): + tm := time.Now() + if err := clstr.tend(); err != nil { + Logger.Warn(err.Error()) + } + + // Tending took longer than requested tend interval. + // Tending is too slow for the cluster, and may be falling behind scheule. + if tendDuration := time.Since(tm); tendDuration > clstr.clientPolicy.TendInterval { + Logger.Warn("Tending took %s, while your requested ClientPolicy.TendInterval is %s. Tends are slower than the interval, and may be falling behind the changes in the cluster.", tendDuration, clstr.clientPolicy.TendInterval) + } + } + } + + // cleanup code goes here + clstr.closed.Set(true) + + // close the nodes + nodeArray := clstr.GetNodes() + for _, node := range nodeArray { + node.Close() + } +} + +// AddSeeds adds new hosts to the cluster. +// They will be added to the cluster on next tend call. +func (clstr *Cluster) AddSeeds(hosts []*Host) { + clstr.seeds.Update(func(val interface{}) (interface{}, error) { + seeds := val.([]*Host) + seeds = append(seeds, hosts...) + return seeds, nil + }) +} + +// Updates cluster state +func (clstr *Cluster) tend() error { + + nodes := clstr.GetNodes() + nodeCountBeforeTend := len(nodes) + + // All node additions/deletions are performed in tend goroutine. + // If active nodes don't exist, seed cluster. + if len(nodes) == 0 { + Logger.Info("No connections available; seeding...") + if newNodesFound, err := clstr.seedNodes(); !newNodesFound { + return err + } + + // refresh nodes list after seeding + nodes = clstr.GetNodes() + } + + peers := newPeers(len(nodes)+16, 16) + + floatSupport := true + batchIndexSupport := true + replicasAllSupport := true + geoSupport := true + + for _, node := range nodes { + // Clear node reference counts. + node.referenceCount.Set(0) + node.partitionChanged.Set(false) + if !node.supportsPeers.Get() { + peers.usePeers.Set(false) + } + } + + wg := sync.WaitGroup{} + wg.Add(len(nodes)) + for _, node := range nodes { + go func(node *Node) { + defer wg.Done() + if err := node.Refresh(peers); err != nil { + Logger.Debug("Error occured while refreshing node: %s", node.String()) + } + }(node) + } + wg.Wait() + + // Refresh peers when necessary. + if peers.usePeers.Get() && (peers.genChanged.Get() || len(peers.peers()) != nodeCountBeforeTend) { + // Refresh peers for all nodes that responded the first time even if only one node's peers changed. + peers.refreshCount.Set(0) + + wg.Add(len(nodes)) + for _, node := range nodes { + go func(node *Node) { + defer wg.Done() + node.refreshPeers(peers) + }(node) + } + wg.Wait() + } + + // find the first host that connects + for _, _peer := range peers.peers() { + if clstr.peerExists(peers, _peer.nodeName) { + // Node already exists. Do not even try to connect to hosts. + continue + } + + wg.Add(1) + go func(__peer *peer) { + defer wg.Done() + for _, host := range __peer.hosts { + // attempt connection to the host + nv := nodeValidator{} + if err := nv.validateNode(clstr, host); err != nil { + Logger.Warn("Add node `%s` failed: `%s`", host, err) + continue + } + + // Must look for new node name in the unlikely event that node names do not agree. + if __peer.nodeName != nv.name { + Logger.Warn("Peer node `%s` is different than actual node `%s` for host `%s`", __peer.nodeName, nv.name, host) + } + + if clstr.peerExists(peers, nv.name) { + // Node already exists. Do not even try to connect to hosts. + break + } + + // Create new node. + node := clstr.createNode(&nv) + peers.addNode(nv.name, node) + node.refreshPartitions(peers) + break + } + }(_peer) + } + + // Refresh partition map when necessary. + wg.Add(len(nodes)) + for _, node := range nodes { + go func(node *Node) { + defer wg.Done() + if node.partitionChanged.Get() { + node.refreshPartitions(peers) + } + }(node) + } + + // This waits for the both steps above + wg.Wait() + + if peers.genChanged.Get() || !peers.usePeers.Get() { + // Handle nodes changes determined from refreshes. + removeList := clstr.findNodesToRemove(peers.refreshCount.Get()) + + // Remove nodes in a batch. + if len(removeList) > 0 { + for _, n := range removeList { + Logger.Debug("The following nodes will be removed: %s", n) + } + clstr.removeNodes(removeList) + } + } + + // Add nodes in a batch. + if len(peers.nodes()) > 0 { + clstr.addNodes(peers.nodes()) + } + + if !floatSupport { + Logger.Warn("Some cluster nodes do not support float type. Disabling native float support in the client library...") + } + + // Disable prole requests if some nodes don't support it. + if clstr.clientPolicy.RequestProleReplicas && !replicasAllSupport { + Logger.Warn("Some nodes don't support 'replicas-all'. Will use 'replicas-master' for all nodes.") + } + + // set the cluster supported features + clstr.supportsFloat.Set(floatSupport) + clstr.supportsBatchIndex.Set(batchIndexSupport) + clstr.supportsReplicasAll.Set(replicasAllSupport) + clstr.requestProleReplicas.Set(clstr.clientPolicy.RequestProleReplicas && replicasAllSupport) + clstr.supportsGeo.Set(geoSupport) + + // update all partitions in one go + var partitionMap partitionMap + for _, node := range clstr.GetNodes() { + if node.partitionChanged.Get() { + if partitionMap == nil { + partitionMap = clstr.getPartitions().clone() + } + + partitionMap.merge(node.partitionMap) + } + } + + if partitionMap != nil { + clstr.setPartitions(partitionMap) + } + + // only log if node count is changed + if nodeCountBeforeTend != len(clstr.GetNodes()) { + Logger.Info("Tend finished. Live node count changes from %d to %d", nodeCountBeforeTend, len(clstr.GetNodes())) + } + return nil +} + +func (clstr *Cluster) peerExists(peers *peers, nodeName string) bool { + node := clstr.findNodeByName(nodeName) + if node != nil { + node.referenceCount.IncrementAndGet() + return true + } + + node = peers.nodeByName(nodeName) + if node != nil { + node.referenceCount.IncrementAndGet() + return true + } + + return false +} + +// Tend the cluster until it has stabilized and return control. +// This helps avoid initial database request timeout issues when +// a large number of threads are initiated at client startup. +// +// If the cluster has not stabilized by the timeout, return +// control as well. Do not return an error since future +// database requests may still succeed. +func (clstr *Cluster) waitTillStabilized() error { + count := -1 + + doneCh := make(chan error, 10) + + // will run until the cluster is stabilized + go func() { + var err error + for { + if err = clstr.tend(); err != nil { + if aerr, ok := err.(AerospikeError); ok { + switch aerr.ResultCode() { + case NOT_AUTHENTICATED, CLUSTER_NAME_MISMATCH_ERROR: + doneCh <- err + return + } + } + Logger.Warn(err.Error()) + } + + // Check to see if cluster has changed since the last Tend(). + // If not, assume cluster has stabilized and return. + if count == len(clstr.GetNodes()) { + break + } + + time.Sleep(time.Millisecond) + + count = len(clstr.GetNodes()) + } + doneCh <- err + }() + + select { + case <-time.After(clstr.clientPolicy.Timeout): + return errors.New("Connecting to the cluster timed out.") + case err := <-doneCh: + return err + } +} + +func (clstr *Cluster) findAlias(alias *Host) *Node { + res, _ := clstr.aliases.GetSyncedVia(func(val interface{}) (interface{}, error) { + aliases := val.(map[Host]*Node) + return aliases[*alias], nil + }) + + return res.(*Node) +} + +func (clstr *Cluster) setPartitions(partMap partitionMap) { + clstr.partitionWriteMap.Store(partMap) +} + +func (clstr *Cluster) getPartitions() partitionMap { + return clstr.partitionWriteMap.Load().(partitionMap) +} + +// Adds seeds to the cluster +func (clstr *Cluster) seedNodes() (bool, error) { + // Must copy array reference for copy on write semantics to work. + seedArrayIfc, _ := clstr.seeds.GetSyncedVia(func(val interface{}) (interface{}, error) { + seeds := val.([]*Host) + seeds_copy := make([]*Host, len(seeds)) + copy(seeds_copy, seeds) + + return seeds_copy, nil + }) + seedArray := seedArrayIfc.([]*Host) + + successChan := make(chan struct{}, len(seedArray)) + errChan := make(chan error, len(seedArray)) + + Logger.Info("Seeding the cluster. Seeds count: %d", len(seedArray)) + + // Add all nodes at once to avoid copying entire array multiple times. + var wg sync.WaitGroup + wg.Add(len(seedArray)) + for i, seed := range seedArray { + go func(index int, seed *Host) { + defer wg.Done() + + nodesToAdd := &nodesToAddT{nodesToAdd: map[string]*Node{}} + nv := nodeValidator{} + err := nv.seedNodes(clstr, seed, nodesToAdd) + if err != nil { + Logger.Warn("Seed %s failed: %s", seed.String(), err.Error()) + errChan <- err + return + } + clstr.addNodes(nodesToAdd.nodesToAdd) + successChan <- struct{}{} + }(i, seed) + } + + errorList := make([]error, 0, len(seedArray)) + seedCount := len(seedArray) +L: + for { + select { + case err := <-errChan: + errorList = append(errorList, err) + seedCount-- + if seedCount <= 0 { + break L + } + case <-successChan: + // even one seed is enough + return true, nil + case <-time.After(clstr.clientPolicy.Timeout): + // time is up, no seeds found + wg.Wait() + break L + } + } + + var errStrs []string + for _, err := range errorList { + if err != nil { + if aerr, ok := err.(AerospikeError); ok { + switch aerr.ResultCode() { + case NOT_AUTHENTICATED: + return false, NewAerospikeError(NOT_AUTHENTICATED) + case CLUSTER_NAME_MISMATCH_ERROR: + return false, aerr + } + } + errStrs = append(errStrs, err.Error()) + } + } + + return false, NewAerospikeError(INVALID_NODE_ERROR, "Failed to connect to hosts:"+strings.Join(errStrs, "\n")) +} + +func (clstr *Cluster) createNode(nv *nodeValidator) *Node { + return newNode(clstr, nv) +} + +// Finds a node by name in a list of nodes +func (clstr *Cluster) findNodeName(list []*Node, name string) bool { + for _, node := range list { + if node.GetName() == name { + return true + } + } + return false +} + +func (clstr *Cluster) addAlias(host *Host, node *Node) { + if host != nil && node != nil { + clstr.aliases.Update(func(val interface{}) (interface{}, error) { + aliases := val.(map[Host]*Node) + aliases[*host] = node + return aliases, nil + }) + } +} + +func (clstr *Cluster) removeAlias(alias *Host) { + if alias != nil { + clstr.aliases.Update(func(val interface{}) (interface{}, error) { + aliases := val.(map[Host]*Node) + delete(aliases, *alias) + return aliases, nil + }) + } +} + +func (clstr *Cluster) findNodesToRemove(refreshCount int) []*Node { + nodes := clstr.GetNodes() + + removeList := []*Node{} + + for _, node := range nodes { + if !node.IsActive() { + // Inactive nodes must be removed. + removeList = append(removeList, node) + continue + } + + switch len(nodes) { + case 1: + // Single node clusters rely on whether it responded to info requests. + if node.failures.Get() >= 5 { + // Remove node. Seeds will be tried in next cluster tend iteration. + removeList = append(removeList, node) + } + + case 2: + // Two node clusters require at least one successful refresh before removing. + if refreshCount == 1 && node.referenceCount.Get() == 0 && node.failures.Get() > 0 { + // Node is not referenced nor did it respond. + removeList = append(removeList, node) + } + + default: + // Multi-node clusters require at least one successful refresh before removing + // or alternatively, if connection to the whle cluster has been cut. + if (refreshCount >= 1 && node.referenceCount.Get() == 0) || (refreshCount == 0 && node.failures.Get() > 5) { + // Node is not referenced by other nodes. + // Check if node responded to info request. + if node.failures.Get() == 0 { + // Node is alive, but not referenced by other nodes. Check if mapped. + if !clstr.findNodeInPartitionMap(node) { + // Node doesn't have any partitions mapped to it. + // There is no point in keeping it in the cluster. + removeList = append(removeList, node) + } + } else { + // Node not responding. Remove it. + removeList = append(removeList, node) + } + } + } + } + return removeList +} + +func (clstr *Cluster) findNodeInPartitionMap(filter *Node) bool { + partitions := clstr.getPartitions() + + for _, replicaArray := range partitions { + for _, nodeArray := range replicaArray { + for _, node := range nodeArray { + // Use reference equality for performance. + if node == filter { + return true + } + } + } + } + return false +} + +func (clstr *Cluster) addNodes(nodesToAdd map[string]*Node) { + clstr.nodes.Update(func(val interface{}) (interface{}, error) { + nodes := val.([]*Node) + for _, node := range nodesToAdd { + if node != nil && !clstr.findNodeName(nodes, node.name) { + Logger.Debug("Adding node %s (%s) to the cluster.", node.name, node.host.String()) + nodes = append(nodes, node) + } + } + + nodesMap := make(map[string]*Node, len(nodes)) + nodesAliases := make(map[Host]*Node, len(nodes)) + for i := range nodes { + nodesMap[nodes[i].name] = nodes[i] + + for _, alias := range nodes[i].GetAliases() { + nodesAliases[*alias] = nodes[i] + } + } + + clstr.nodesMap.Set(nodesMap) + clstr.aliases.Set(nodesAliases) + + return nodes, nil + }) +} + +func (clstr *Cluster) removeNodes(nodesToRemove []*Node) { + + // There is no need to delete nodes from partitionWriteMap because the nodes + // have already been set to inactive. + + // Cleanup node resources. + for _, node := range nodesToRemove { + // Remove node's aliases from cluster alias set. + // Aliases are only used in tend goroutine, so synchronization is not necessary. + clstr.aliases.Update(func(val interface{}) (interface{}, error) { + aliases := val.(map[Host]*Node) + for _, alias := range node.GetAliases() { + delete(aliases, *alias) + } + return aliases, nil + }) + + clstr.nodesMap.Update(func(val interface{}) (interface{}, error) { + nodesMap := val.(map[string]*Node) + delete(nodesMap, node.name) + return nodesMap, nil + }) + + node.Close() + } + + // Remove all nodes at once to avoid copying entire array multiple times. + clstr.nodes.Update(func(val interface{}) (interface{}, error) { + nodes := val.([]*Node) + nlist := make([]*Node, 0, len(nodes)) + nlist = append(nlist, nodes...) + for i, n := range nlist { + for _, ntr := range nodesToRemove { + if ntr.Equals(n) { + nlist[i] = nil + } + } + } + + newNodes := make([]*Node, 0, len(nlist)) + for i := range nlist { + if nlist[i] != nil { + newNodes = append(newNodes, nlist[i]) + } + } + + return newNodes, nil + }) + +} + +// IsConnected returns true if cluster has nodes and is not already closed. +func (clstr *Cluster) IsConnected() bool { + // Must copy array reference for copy on write semantics to work. + nodeArray := clstr.GetNodes() + return (len(nodeArray) > 0) && !clstr.closed.Get() +} + +func (clstr *Cluster) getReadNode(partition *Partition, replica ReplicaPolicy) (*Node, error) { + switch replica { + case MASTER: + return clstr.getMasterNode(partition) + case MASTER_PROLES: + return clstr.getMasterProleNode(partition) + default: + // includes case RANDOM: + return clstr.GetRandomNode() + } +} + +func (clstr *Cluster) getMasterNode(partition *Partition) (*Node, error) { + pmap := clstr.getPartitions() + replicaArray := pmap[partition.Namespace] + + if replicaArray != nil { + node := replicaArray[0][partition.PartitionId] + if node != nil && node.IsActive() { + return node, nil + } + } + + return clstr.GetRandomNode() +} + +func (clstr *Cluster) getMasterProleNode(partition *Partition) (*Node, error) { + pmap := clstr.getPartitions() + replicaArray := pmap[partition.Namespace] + + if replicaArray != nil { + for range replicaArray { + index := int(atomic.AddUint64(&clstr.replicaIndex, 1) % uint64(len(replicaArray))) + node := replicaArray[index][partition.PartitionId] + if node != nil && node.IsActive() { + return node, nil + } + } + } + + return clstr.GetRandomNode() +} + +// GetRandomNode returns a random node on the cluster +func (clstr *Cluster) GetRandomNode() (*Node, error) { + // Must copy array reference for copy on write semantics to work. + nodeArray := clstr.GetNodes() + length := len(nodeArray) + for i := 0; i < length; i++ { + // Must handle concurrency with other non-tending goroutines, so nodeIndex is consistent. + index := int(atomic.AddUint64(&clstr.nodeIndex, 1) % uint64(length)) + node := nodeArray[index] + + if node != nil && node.IsActive() { + // Logger.Debug("Node `%s` is active. index=%d", node, index) + return node, nil + } + } + return nil, NewAerospikeError(INVALID_NODE_ERROR) +} + +// GetNodes returns a list of all nodes in the cluster +func (clstr *Cluster) GetNodes() []*Node { + // Must copy array reference for copy on write semantics to work. + return clstr.nodes.Get().([]*Node) +} + +// GetSeeds returns a list of all seed nodes in the cluster +func (clstr *Cluster) GetSeeds() []Host { + res, _ := clstr.seeds.GetSyncedVia(func(val interface{}) (interface{}, error) { + seeds := val.([]*Host) + res := make([]Host, 0, len(seeds)) + for _, seed := range seeds { + res = append(res, *seed) + } + + return res, nil + }) + + return res.([]Host) +} + +// GetAliases returns a list of all node aliases in the cluster +func (clstr *Cluster) GetAliases() map[Host]*Node { + res, _ := clstr.aliases.GetSyncedVia(func(val interface{}) (interface{}, error) { + aliases := val.(map[Host]*Node) + res := make(map[Host]*Node, len(aliases)) + for h, n := range aliases { + res[h] = n + } + + return res, nil + }) + + return res.(map[Host]*Node) +} + +// GetNodeByName finds a node by name and returns an +// error if the node is not found. +func (clstr *Cluster) GetNodeByName(nodeName string) (*Node, error) { + node := clstr.findNodeByName(nodeName) + + if node == nil { + return nil, NewAerospikeError(INVALID_NODE_ERROR) + } + return node, nil +} + +func (clstr *Cluster) findNodeByName(nodeName string) *Node { + // Must copy array reference for copy on write semantics to work. + for _, node := range clstr.GetNodes() { + if node.GetName() == nodeName { + return node + } + } + return nil +} + +// Close closes all cached connections to the cluster nodes +// and stops the tend goroutine. +func (clstr *Cluster) Close() { + if !clstr.closed.Get() { + // send close signal to maintenance channel + close(clstr.tendChannel) + + // wait until tend is over + clstr.wgTend.Wait() + } +} + +// MigrationInProgress determines if any node in the cluster +// is participating in a data migration +func (clstr *Cluster) MigrationInProgress(timeout time.Duration) (res bool, err error) { + if timeout <= 0 { + timeout = _DEFAULT_TIMEOUT + } + + done := make(chan bool, 1) + + go func() { + // this function is guaranteed to return after _DEFAULT_TIMEOUT + nodes := clstr.GetNodes() + for _, node := range nodes { + if node.IsActive() { + if res, err = node.MigrationInProgress(); res || err != nil { + done <- true + return + } + } + } + + res, err = false, nil + done <- false + }() + + dealine := time.After(timeout) + for { + select { + case <-dealine: + return false, NewAerospikeError(TIMEOUT) + case <-done: + return res, err + } + } +} + +// WaitUntillMigrationIsFinished will block until all +// migration operations in the cluster all finished. +func (clstr *Cluster) WaitUntillMigrationIsFinished(timeout time.Duration) (err error) { + if timeout <= 0 { + timeout = _NO_TIMEOUT + } + done := make(chan error, 1) + + go func() { + // this function is guaranteed to return after timeout + // no go routines will be leaked + for { + if res, err := clstr.MigrationInProgress(timeout); err != nil || !res { + done <- err + return + } + } + }() + + dealine := time.After(timeout) + select { + case <-dealine: + return NewAerospikeError(TIMEOUT) + case err = <-done: + return err + } +} + +// Password returns the password that is currently used with the cluster. +func (clstr *Cluster) Password() (res []byte) { + pass := clstr.password.Get() + if pass != nil { + return pass.([]byte) + } + return nil +} + +func (clstr *Cluster) changePassword(user string, password string, hash []byte) { + // change password ONLY if the user is the same + if clstr.user == user { + clstr.clientPolicy.Password = password + clstr.password.Set(hash) + } +} + +// ClientPolicy returns the client policy that is currently used with the cluster. +func (clstr *Cluster) ClientPolicy() (res ClientPolicy) { + return clstr.clientPolicy +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/command.go b/vendor/github.com/aerospike/aerospike-client-go/command.go new file mode 100644 index 00000000000..5a118f28186 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/command.go @@ -0,0 +1,1385 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "time" + + . "github.com/aerospike/aerospike-client-go/logger" + . "github.com/aerospike/aerospike-client-go/types" + + ParticleType "github.com/aerospike/aerospike-client-go/types/particle_type" + // Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" +) + +const ( + // Flags commented out are not supported by cmd client. + // Contains a read operation. + _INFO1_READ int = (1 << 0) + // Get all bins. + _INFO1_GET_ALL int = (1 << 1) + + // Do not read the bins + _INFO1_NOBINDATA int = (1 << 5) + + // Involve all replicas in read operation. + _INFO1_CONSISTENCY_ALL = (1 << 6) + + // Create or update record + _INFO2_WRITE int = (1 << 0) + // Fling a record into the belly of Moloch. + _INFO2_DELETE int = (1 << 1) + // Update if expected generation == old. + _INFO2_GENERATION int = (1 << 2) + // Update if new generation >= old, good for restore. + _INFO2_GENERATION_GT int = (1 << 3) + // Transaction resulting in record deletion leaves tombstone (Enterprise only). + _INFO2_DURABLE_DELETE int = (1 << 4) + // Create only. Fail if record already exists. + _INFO2_CREATE_ONLY int = (1 << 5) + + // Return a result for every operation. + _INFO2_RESPOND_ALL_OPS int = (1 << 7) + + // This is the last of a multi-part message. + _INFO3_LAST int = (1 << 0) + // Commit to master only before declaring success. + _INFO3_COMMIT_MASTER int = (1 << 1) + // Update only. Merge bins. + _INFO3_UPDATE_ONLY int = (1 << 3) + + // Create or completely replace record. + _INFO3_CREATE_OR_REPLACE int = (1 << 4) + // Completely replace existing record only. + _INFO3_REPLACE_ONLY int = (1 << 5) + + _MSG_TOTAL_HEADER_SIZE uint8 = 30 + _FIELD_HEADER_SIZE uint8 = 5 + _OPERATION_HEADER_SIZE uint8 = 8 + _MSG_REMAINING_HEADER_SIZE uint8 = 22 + _DIGEST_SIZE uint8 = 20 + _CL_MSG_VERSION int64 = 2 + _AS_MSG_TYPE int64 = 3 +) + +// command interface describes all commands available +type command interface { + getPolicy(ifc command) Policy + + writeBuffer(ifc command) error + getNode(ifc command) (*Node, error) + getConnection(timeout time.Duration) (*Connection, error) + putConnection(conn *Connection) + parseResult(ifc command, conn *Connection) error + parseRecordResults(ifc command, receiveSize int) (bool, error) + + execute(ifc command) error + // Executes the command + Execute() error +} + +// Holds data buffer for the command +type baseCommand struct { + node *Node + conn *Connection + + dataBuffer []byte + dataOffset int + + // oneShot determines if streaming commands like query, scan or queryAggregate + // are not retried if they error out mid-parsing + oneShot bool +} + +// Writes the command for write operations +func (cmd *baseCommand) setWrite(policy *WritePolicy, operation OperationType, key *Key, bins []*Bin, binMap BinMap) error { + cmd.begin() + fieldCount, err := cmd.estimateKeySize(key, policy.SendKey) + if err != nil { + return err + } + + if binMap == nil { + for i := range bins { + if err := cmd.estimateOperationSizeForBin(bins[i]); err != nil { + return err + } + } + } else { + for name, value := range binMap { + if err := cmd.estimateOperationSizeForBinNameAndValue(name, value); err != nil { + return err + } + } + } + + if err := cmd.sizeBuffer(); err != nil { + return err + } + + if binMap == nil { + cmd.writeHeaderWithPolicy(policy, 0, _INFO2_WRITE, fieldCount, len(bins)) + } else { + cmd.writeHeaderWithPolicy(policy, 0, _INFO2_WRITE, fieldCount, len(binMap)) + } + + cmd.writeKey(key, policy.SendKey) + + if binMap == nil { + for i := range bins { + if err := cmd.writeOperationForBin(bins[i], operation); err != nil { + return err + } + } + } else { + for name, value := range binMap { + if err := cmd.writeOperationForBinNameAndValue(name, value, operation); err != nil { + return err + } + } + } + + cmd.end() + + return nil +} + +// Writes the command for delete operations +func (cmd *baseCommand) setDelete(policy *WritePolicy, key *Key) error { + cmd.begin() + fieldCount, err := cmd.estimateKeySize(key, false) + if err != nil { + return err + } + if err := cmd.sizeBuffer(); err != nil { + return nil + } + cmd.writeHeaderWithPolicy(policy, 0, _INFO2_WRITE|_INFO2_DELETE, fieldCount, 0) + cmd.writeKey(key, false) + cmd.end() + return nil + +} + +// Writes the command for touch operations +func (cmd *baseCommand) setTouch(policy *WritePolicy, key *Key) error { + cmd.begin() + fieldCount, err := cmd.estimateKeySize(key, policy.SendKey) + if err != nil { + return err + } + + cmd.estimateOperationSize() + if err := cmd.sizeBuffer(); err != nil { + return nil + } + cmd.writeHeaderWithPolicy(policy, 0, _INFO2_WRITE, fieldCount, 1) + cmd.writeKey(key, policy.SendKey) + cmd.writeOperationForOperationType(TOUCH) + cmd.end() + return nil + +} + +// Writes the command for exist operations +func (cmd *baseCommand) setExists(policy *BasePolicy, key *Key) error { + cmd.begin() + fieldCount, err := cmd.estimateKeySize(key, false) + if err != nil { + return err + } + if err := cmd.sizeBuffer(); err != nil { + return nil + } + cmd.writeHeader(policy, _INFO1_READ|_INFO1_NOBINDATA, 0, fieldCount, 0) + cmd.writeKey(key, false) + cmd.end() + return nil + +} + +// Writes the command for get operations (all bins) +func (cmd *baseCommand) setReadForKeyOnly(policy *BasePolicy, key *Key) error { + cmd.begin() + fieldCount, err := cmd.estimateKeySize(key, false) + if err != nil { + return err + } + if err := cmd.sizeBuffer(); err != nil { + return nil + } + cmd.writeHeader(policy, _INFO1_READ|_INFO1_GET_ALL, 0, fieldCount, 0) + cmd.writeKey(key, false) + cmd.end() + return nil + +} + +// Writes the command for get operations (specified bins) +func (cmd *baseCommand) setRead(policy *BasePolicy, key *Key, binNames []string) (err error) { + if len(binNames) > 0 { + cmd.begin() + fieldCount, err := cmd.estimateKeySize(key, false) + if err != nil { + return err + } + + for i := range binNames { + cmd.estimateOperationSizeForBinName(binNames[i]) + } + if err = cmd.sizeBuffer(); err != nil { + return nil + } + cmd.writeHeader(policy, _INFO1_READ, 0, fieldCount, len(binNames)) + cmd.writeKey(key, false) + + for i := range binNames { + cmd.writeOperationForBinName(binNames[i], READ) + } + cmd.end() + } else { + err = cmd.setReadForKeyOnly(policy, key) + } + + return err +} + +// Writes the command for getting metadata operations +func (cmd *baseCommand) setReadHeader(policy *BasePolicy, key *Key) error { + cmd.begin() + fieldCount, err := cmd.estimateKeySize(key, false) + if err != nil { + return err + } + cmd.estimateOperationSizeForBinName("") + if err := cmd.sizeBuffer(); err != nil { + return nil + } + + cmd.writeHeader(policy, _INFO1_READ|_INFO1_NOBINDATA, 0, fieldCount, 1) + + cmd.writeKey(key, false) + cmd.writeOperationForBinName("", READ) + cmd.end() + return nil + +} + +// Implements different command operations +func (cmd *baseCommand) setOperate(policy *WritePolicy, key *Key, operations []*Operation) error { + if len(operations) == 0 { + return NewAerospikeError(PARAMETER_ERROR, "No operations were passed.") + } + + cmd.begin() + fieldCount := 0 + readAttr := 0 + writeAttr := 0 + readBin := false + readHeader := false + RespondPerEachOp := policy.RespondPerEachOp + + for i := range operations { + switch operations[i].opType { + case MAP_READ: + // Map operations require RespondPerEachOp to be true. + RespondPerEachOp = true + // Fall through to read. + fallthrough + case READ, CDT_READ: + if !operations[i].headerOnly { + readAttr |= _INFO1_READ + + // Read all bins if no bin is specified. + if operations[i].binName == "" { + readAttr |= _INFO1_GET_ALL + } + readBin = true + } else { + readAttr |= _INFO1_READ + readHeader = true + } + case MAP_MODIFY: + // Map operations require RespondPerEachOp to be true. + RespondPerEachOp = true + // Fall through to default. + fallthrough + default: + writeAttr = _INFO2_WRITE + } + cmd.estimateOperationSizeForOperation(operations[i]) + } + + ksz, err := cmd.estimateKeySize(key, policy.SendKey && writeAttr != 0) + if err != nil { + return err + } + fieldCount += ksz + + if err := cmd.sizeBuffer(); err != nil { + return err + } + + if readHeader && !readBin { + readAttr |= _INFO1_NOBINDATA + } + + if RespondPerEachOp { + writeAttr |= _INFO2_RESPOND_ALL_OPS + } + + if writeAttr != 0 { + cmd.writeHeaderWithPolicy(policy, readAttr, writeAttr, fieldCount, len(operations)) + } else { + cmd.writeHeader(&policy.BasePolicy, readAttr, writeAttr, fieldCount, len(operations)) + } + cmd.writeKey(key, policy.SendKey && writeAttr != 0) + + for _, operation := range operations { + if err := cmd.writeOperationForOperation(operation); err != nil { + return err + } + } + + cmd.end() + + return nil +} + +func (cmd *baseCommand) setUdf(policy *WritePolicy, key *Key, packageName string, functionName string, args *ValueArray) error { + cmd.begin() + fieldCount, err := cmd.estimateKeySize(key, policy.SendKey) + if err != nil { + return err + } + + fc, err := cmd.estimateUdfSize(packageName, functionName, args) + if err != nil { + return err + } + fieldCount += fc + + if err := cmd.sizeBuffer(); err != nil { + return nil + } + + cmd.writeHeaderWithPolicy(policy, 0, _INFO2_WRITE, fieldCount, 0) + cmd.writeKey(key, policy.SendKey) + cmd.writeFieldString(packageName, UDF_PACKAGE_NAME) + cmd.writeFieldString(functionName, UDF_FUNCTION) + cmd.writeUdfArgs(args) + cmd.end() + + return nil +} + +func (cmd *baseCommand) setBatchExists(policy *BasePolicy, keys []*Key, batch *batchNamespace) error { + // Estimate buffer size + cmd.begin() + byteSize := batch.offsetSize * int(_DIGEST_SIZE) + + cmd.dataOffset += len(*batch.namespace) + + int(_FIELD_HEADER_SIZE) + byteSize + int(_FIELD_HEADER_SIZE) + if err := cmd.sizeBuffer(); err != nil { + return nil + } + + cmd.writeHeader(policy, _INFO1_READ|_INFO1_NOBINDATA, 0, 2, 0) + cmd.writeFieldString(*batch.namespace, NAMESPACE) + cmd.writeFieldHeader(byteSize, DIGEST_RIPE_ARRAY) + + offsets := batch.offsets + max := batch.offsetSize + + for i := 0; i < max; i++ { + key := keys[offsets[i]] + copy(cmd.dataBuffer[cmd.dataOffset:], key.digest[:]) + cmd.dataOffset += len(key.digest) + } + cmd.end() + + return nil +} + +func (cmd *baseCommand) setBatchGet(policy *BasePolicy, keys []*Key, batch *batchNamespace, binNames map[string]struct{}, readAttr int) error { + // Estimate buffer size + cmd.begin() + byteSize := batch.offsetSize * int(_DIGEST_SIZE) + + cmd.dataOffset += len(*batch.namespace) + + int(_FIELD_HEADER_SIZE) + byteSize + int(_FIELD_HEADER_SIZE) + + for binName := range binNames { + cmd.estimateOperationSizeForBinName(binName) + } + + if err := cmd.sizeBuffer(); err != nil { + return nil + } + + operationCount := len(binNames) + cmd.writeHeader(policy, readAttr, 0, 2, operationCount) + cmd.writeFieldString(*batch.namespace, NAMESPACE) + cmd.writeFieldHeader(byteSize, DIGEST_RIPE_ARRAY) + + offsets := batch.offsets + max := batch.offsetSize + + for i := 0; i < max; i++ { + key := keys[offsets[i]] + copy(cmd.dataBuffer[cmd.dataOffset:], key.digest[:]) + cmd.dataOffset += len(key.digest) + } + + for binName := range binNames { + cmd.writeOperationForBinName(binName, READ) + } + cmd.end() + + return nil +} + +func (cmd *baseCommand) setScan(policy *ScanPolicy, namespace *string, setName *string, binNames []string, taskId uint64) error { + cmd.begin() + fieldCount := 0 + // predExpsSize := 0 + + if namespace != nil { + cmd.dataOffset += len(*namespace) + int(_FIELD_HEADER_SIZE) + fieldCount++ + } + + if setName != nil { + cmd.dataOffset += len(*setName) + int(_FIELD_HEADER_SIZE) + fieldCount++ + } + + // Estimate scan options size. + cmd.dataOffset += 2 + int(_FIELD_HEADER_SIZE) + fieldCount++ + + // Estimate scan timeout size. + cmd.dataOffset += 4 + int(_FIELD_HEADER_SIZE) + fieldCount++ + + // Allocate space for TaskId field. + cmd.dataOffset += 8 + int(_FIELD_HEADER_SIZE) + fieldCount++ + + if binNames != nil { + for i := range binNames { + cmd.estimateOperationSizeForBinName(binNames[i]) + } + } + + if err := cmd.sizeBuffer(); err != nil { + return nil + } + readAttr := _INFO1_READ + + if !policy.IncludeBinData { + readAttr |= _INFO1_NOBINDATA + } + + operationCount := 0 + if binNames != nil { + operationCount = len(binNames) + } + cmd.writeHeader(policy.BasePolicy, readAttr, 0, fieldCount, operationCount) + + if namespace != nil { + cmd.writeFieldString(*namespace, NAMESPACE) + } + + if setName != nil { + cmd.writeFieldString(*setName, TABLE) + } + + cmd.writeFieldHeader(2, SCAN_OPTIONS) + priority := byte(policy.Priority) + priority <<= 4 + + if policy.FailOnClusterChange { + priority |= 0x08 + } + + if policy.IncludeLDT { + priority |= 0x02 + } + + cmd.WriteByte(priority) + cmd.WriteByte(byte(policy.ScanPercent)) + + // Write scan timeout + cmd.writeFieldHeader(4, SCAN_TIMEOUT) + cmd.WriteInt32(int32(policy.ServerSocketTimeout / time.Millisecond)) // in milliseconds + + cmd.writeFieldHeader(8, TRAN_ID) + cmd.WriteUint64(taskId) + + if binNames != nil { + for i := range binNames { + cmd.writeOperationForBinName(binNames[i], READ) + } + } + + cmd.end() + + return nil +} + +func (cmd *baseCommand) setQuery(policy *QueryPolicy, statement *Statement, write bool) (err error) { + fieldCount := 0 + filterSize := 0 + binNameSize := 0 + predExpsSize := 0 + + cmd.begin() + + if statement.Namespace != "" { + cmd.dataOffset += len(statement.Namespace) + int(_FIELD_HEADER_SIZE) + fieldCount++ + } + + if statement.IndexName != "" { + cmd.dataOffset += len(statement.IndexName) + int(_FIELD_HEADER_SIZE) + fieldCount++ + } + + if statement.SetName != "" { + cmd.dataOffset += len(statement.SetName) + int(_FIELD_HEADER_SIZE) + fieldCount++ + } + + // Allocate space for TaskId field. + cmd.dataOffset += 8 + int(_FIELD_HEADER_SIZE) + fieldCount++ + + if len(statement.Filters) > 0 { + if len(statement.Filters) > 1 { + return NewAerospikeError(PARAMETER_ERROR, "Aerospike server currently supports only one filter.") + } else if len(statement.Filters) == 1 { + idxType := statement.Filters[0].IndexCollectionType() + + if idxType != ICT_DEFAULT { + cmd.dataOffset += int(_FIELD_HEADER_SIZE) + 1 + fieldCount++ + } + } + + cmd.dataOffset += int(_FIELD_HEADER_SIZE) + filterSize++ // num filters + + for _, filter := range statement.Filters { + sz, err := filter.estimateSize() + if err != nil { + return err + } + filterSize += sz + } + cmd.dataOffset += filterSize + fieldCount++ + + // Query bin names are specified as a field (Scan bin names are specified later as operations) + if len(statement.BinNames) > 0 { + cmd.dataOffset += int(_FIELD_HEADER_SIZE) + binNameSize++ // num bin names + + for _, binName := range statement.BinNames { + binNameSize += len(binName) + 1 + } + cmd.dataOffset += binNameSize + fieldCount++ + } + } else { + // Calling query with no filters is more efficiently handled by a primary index scan. + // Estimate scan options size. + cmd.dataOffset += (2 + int(_FIELD_HEADER_SIZE)) + fieldCount++ + } + + if len(statement.predExps) > 0 { + cmd.dataOffset += int(_FIELD_HEADER_SIZE) + for _, predexp := range statement.predExps { + predExpsSize += predexp.marshaledSize() + } + cmd.dataOffset += predExpsSize + fieldCount++ + } + + var functionArgs *ValueArray + if statement.functionName != "" { + cmd.dataOffset += int(_FIELD_HEADER_SIZE) + 1 // udf type + cmd.dataOffset += len(statement.packageName) + int(_FIELD_HEADER_SIZE) + cmd.dataOffset += len(statement.functionName) + int(_FIELD_HEADER_SIZE) + + fasz := 0 + if len(statement.functionArgs) > 0 { + functionArgs = NewValueArray(statement.functionArgs) + fasz, err = functionArgs.estimateSize() + if err != nil { + return err + } + } + + cmd.dataOffset += int(_FIELD_HEADER_SIZE) + fasz + fieldCount += 4 + } + + if len(statement.Filters) == 0 { + if len(statement.BinNames) > 0 { + for _, binName := range statement.BinNames { + cmd.estimateOperationSizeForBinName(binName) + } + } + } + + if err := cmd.sizeBuffer(); err != nil { + return nil + } + + operationCount := 0 + if len(statement.Filters) == 0 && len(statement.BinNames) > 0 { + operationCount = len(statement.BinNames) + } + + if write { + cmd.writeHeader(policy.BasePolicy, _INFO1_READ, _INFO2_WRITE, fieldCount, operationCount) + } else { + cmd.writeHeader(policy.BasePolicy, _INFO1_READ, 0, fieldCount, operationCount) + } + + if statement.Namespace != "" { + cmd.writeFieldString(statement.Namespace, NAMESPACE) + } + + if statement.IndexName != "" { + cmd.writeFieldString(statement.IndexName, INDEX_NAME) + } + + if statement.SetName != "" { + cmd.writeFieldString(statement.SetName, TABLE) + } + + cmd.writeFieldHeader(8, TRAN_ID) + cmd.WriteUint64(statement.TaskId) + + if len(statement.Filters) > 0 { + if len(statement.Filters) >= 1 { + idxType := statement.Filters[0].IndexCollectionType() + + if idxType != ICT_DEFAULT { + cmd.writeFieldHeader(1, INDEX_TYPE) + cmd.WriteByte(byte(idxType)) + } + } + + cmd.writeFieldHeader(filterSize, INDEX_RANGE) + cmd.WriteByte(byte(len(statement.Filters))) + + for _, filter := range statement.Filters { + _, err := filter.write(cmd) + if err != nil { + return err + } + } + + if len(statement.BinNames) > 0 { + cmd.writeFieldHeader(binNameSize, QUERY_BINLIST) + cmd.WriteByte(byte(len(statement.BinNames))) + + for _, binName := range statement.BinNames { + len := copy(cmd.dataBuffer[cmd.dataOffset+1:], binName) + cmd.dataBuffer[cmd.dataOffset] = byte(len) + cmd.dataOffset += len + 1 + } + } + } else { + // Calling query with no filters is more efficiently handled by a primary index scan. + cmd.writeFieldHeader(2, SCAN_OPTIONS) + priority := byte(policy.Priority) + priority <<= 4 + cmd.WriteByte(priority) + cmd.WriteByte(byte(100)) + } + + if len(statement.predExps) > 0 { + cmd.writeFieldHeader(predExpsSize, PREDEXP) + for _, predexp := range statement.predExps { + if err := predexp.marshal(cmd); err != nil { + return err + } + } + } + + if statement.functionName != "" { + cmd.writeFieldHeader(1, UDF_OP) + if statement.returnData { + cmd.dataBuffer[cmd.dataOffset] = byte(1) + } else { + cmd.dataBuffer[cmd.dataOffset] = byte(2) + } + cmd.dataOffset++ + + cmd.writeFieldString(statement.packageName, UDF_PACKAGE_NAME) + cmd.writeFieldString(statement.functionName, UDF_FUNCTION) + cmd.writeUdfArgs(functionArgs) + } + + // scan binNames come last + if len(statement.Filters) == 0 { + if len(statement.BinNames) > 0 { + for _, binName := range statement.BinNames { + cmd.writeOperationForBinName(binName, READ) + } + } + } + + cmd.end() + + return nil +} + +func (cmd *baseCommand) estimateKeySize(key *Key, sendKey bool) (int, error) { + fieldCount := 0 + + if key.namespace != "" { + cmd.dataOffset += len(key.namespace) + int(_FIELD_HEADER_SIZE) + fieldCount++ + } + + if key.setName != "" { + cmd.dataOffset += len(key.setName) + int(_FIELD_HEADER_SIZE) + fieldCount++ + } + + cmd.dataOffset += int(_DIGEST_SIZE + _FIELD_HEADER_SIZE) + fieldCount++ + + if sendKey { + // field header size + key size + sz, err := key.userKey.estimateSize() + if err != nil { + return sz, err + } + cmd.dataOffset += sz + int(_FIELD_HEADER_SIZE) + 1 + fieldCount++ + } + + return fieldCount, nil +} + +func (cmd *baseCommand) estimateUdfSize(packageName string, functionName string, args *ValueArray) (int, error) { + cmd.dataOffset += len(packageName) + int(_FIELD_HEADER_SIZE) + cmd.dataOffset += len(functionName) + int(_FIELD_HEADER_SIZE) + + sz, err := args.estimateSize() + if err != nil { + return 0, err + } + + // fmt.Println(args, sz) + + cmd.dataOffset += sz + int(_FIELD_HEADER_SIZE) + return 3, nil +} + +func (cmd *baseCommand) estimateOperationSizeForBin(bin *Bin) error { + cmd.dataOffset += len(bin.Name) + int(_OPERATION_HEADER_SIZE) + sz, err := bin.Value.estimateSize() + if err != nil { + return err + } + cmd.dataOffset += sz + return nil +} + +func (cmd *baseCommand) estimateOperationSizeForBinNameAndValue(name string, value interface{}) error { + cmd.dataOffset += len(name) + int(_OPERATION_HEADER_SIZE) + sz, err := NewValue(value).estimateSize() + if err != nil { + return err + } + cmd.dataOffset += sz + return nil +} + +func (cmd *baseCommand) estimateOperationSizeForOperation(operation *Operation) error { + binLen := len(operation.binName) + cmd.dataOffset += binLen + int(_OPERATION_HEADER_SIZE) + + if operation.encoder == nil { + if operation.binValue != nil { + sz, err := operation.binValue.estimateSize() + if err != nil { + return err + } + cmd.dataOffset += sz + } + } else { + sz, err := operation.encoder(operation, nil) + if err != nil { + return err + } + cmd.dataOffset += sz + } + return nil +} + +func (cmd *baseCommand) estimateOperationSizeForBinName(binName string) { + cmd.dataOffset += len(binName) + int(_OPERATION_HEADER_SIZE) +} + +func (cmd *baseCommand) estimateOperationSize() { + cmd.dataOffset += int(_OPERATION_HEADER_SIZE) +} + +// Generic header write. +func (cmd *baseCommand) writeHeader(policy *BasePolicy, readAttr int, writeAttr int, fieldCount int, operationCount int) { + + if policy.ConsistencyLevel == CONSISTENCY_ALL { + readAttr |= _INFO1_CONSISTENCY_ALL + } + + // Write all header data except total size which must be written last. + cmd.dataBuffer[8] = _MSG_REMAINING_HEADER_SIZE // Message header length. + cmd.dataBuffer[9] = byte(readAttr) + cmd.dataBuffer[10] = byte(writeAttr) + + for i := 11; i < 26; i++ { + cmd.dataBuffer[i] = 0 + } + cmd.dataOffset = 26 + cmd.WriteInt16(int16(fieldCount)) + cmd.WriteInt16(int16(operationCount)) + cmd.dataOffset = int(_MSG_TOTAL_HEADER_SIZE) +} + +// Header write for write operations. +func (cmd *baseCommand) writeHeaderWithPolicy(policy *WritePolicy, readAttr int, writeAttr int, fieldCount int, operationCount int) { + // Set flags. + generation := uint32(0) + infoAttr := 0 + + switch policy.RecordExistsAction { + case UPDATE: + case UPDATE_ONLY: + infoAttr |= _INFO3_UPDATE_ONLY + case REPLACE: + infoAttr |= _INFO3_CREATE_OR_REPLACE + case REPLACE_ONLY: + infoAttr |= _INFO3_REPLACE_ONLY + case CREATE_ONLY: + writeAttr |= _INFO2_CREATE_ONLY + } + + switch policy.GenerationPolicy { + case NONE: + case EXPECT_GEN_EQUAL: + generation = policy.Generation + writeAttr |= _INFO2_GENERATION + case EXPECT_GEN_GT: + generation = policy.Generation + writeAttr |= _INFO2_GENERATION_GT + } + + if policy.CommitLevel == COMMIT_MASTER { + infoAttr |= _INFO3_COMMIT_MASTER + } + + if policy.ConsistencyLevel == CONSISTENCY_ALL { + readAttr |= _INFO1_CONSISTENCY_ALL + } + + if policy.DurableDelete { + writeAttr |= _INFO2_DURABLE_DELETE + } + + // Write all header data except total size which must be written last. + cmd.dataBuffer[8] = _MSG_REMAINING_HEADER_SIZE // Message header length. + cmd.dataBuffer[9] = byte(readAttr) + cmd.dataBuffer[10] = byte(writeAttr) + cmd.dataBuffer[11] = byte(infoAttr) + cmd.dataBuffer[12] = 0 // unused + cmd.dataBuffer[13] = 0 // clear the result code + cmd.dataOffset = 14 + cmd.WriteUint32(generation) + cmd.dataOffset = 18 + cmd.WriteUint32(policy.Expiration) + + // Initialize timeout. It will be written later. + cmd.dataBuffer[22] = 0 + cmd.dataBuffer[23] = 0 + cmd.dataBuffer[24] = 0 + cmd.dataBuffer[25] = 0 + + cmd.dataOffset = 26 + cmd.WriteInt16(int16(fieldCount)) + cmd.WriteInt16(int16(operationCount)) + cmd.dataOffset = int(_MSG_TOTAL_HEADER_SIZE) +} + +func (cmd *baseCommand) writeKey(key *Key, sendKey bool) { + // Write key into buffer. + if key.namespace != "" { + cmd.writeFieldString(key.namespace, NAMESPACE) + } + + if key.setName != "" { + cmd.writeFieldString(key.setName, TABLE) + } + + cmd.writeFieldBytes(key.digest[:], DIGEST_RIPE) + + if sendKey { + cmd.writeFieldValue(key.userKey, KEY) + } +} + +func (cmd *baseCommand) writeOperationForBin(bin *Bin, operation OperationType) error { + nameLength := copy(cmd.dataBuffer[(cmd.dataOffset+int(_OPERATION_HEADER_SIZE)):], bin.Name) + + // check for float support + cmd.checkServerCompatibility(bin.Value) + + valueLength, err := bin.Value.estimateSize() + if err != nil { + return err + } + + cmd.WriteInt32(int32(nameLength + valueLength + 4)) + cmd.WriteByte((operation.op)) + cmd.WriteByte((byte(bin.Value.GetType()))) + cmd.WriteByte((byte(0))) + cmd.WriteByte((byte(nameLength))) + cmd.dataOffset += nameLength + _, err = bin.Value.write(cmd) + return err +} + +func (cmd *baseCommand) writeOperationForBinNameAndValue(name string, val interface{}, operation OperationType) error { + nameLength := copy(cmd.dataBuffer[(cmd.dataOffset+int(_OPERATION_HEADER_SIZE)):], name) + + v := NewValue(val) + + // check for float support + cmd.checkServerCompatibility(v) + + valueLength, err := v.estimateSize() + if err != nil { + return err + } + + cmd.WriteInt32(int32(nameLength + valueLength + 4)) + cmd.WriteByte((operation.op)) + cmd.WriteByte((byte(v.GetType()))) + cmd.WriteByte((byte(0))) + cmd.WriteByte((byte(nameLength))) + cmd.dataOffset += nameLength + _, err = v.write(cmd) + return err +} + +func (cmd *baseCommand) writeOperationForOperation(operation *Operation) error { + nameLength := copy(cmd.dataBuffer[(cmd.dataOffset+int(_OPERATION_HEADER_SIZE)):], operation.binName) + + // check for float support + cmd.checkServerCompatibility(operation.binValue) + + if operation.used { + // cahce will set the used flag to false again + operation.cache() + } + + if operation.encoder == nil { + valueLength, err := operation.binValue.estimateSize() + if err != nil { + return err + } + + cmd.WriteInt32(int32(nameLength + valueLength + 4)) + cmd.WriteByte((operation.opType.op)) + cmd.WriteByte((byte(operation.binValue.GetType()))) + cmd.WriteByte((byte(0))) + cmd.WriteByte((byte(nameLength))) + cmd.dataOffset += nameLength + _, err = operation.binValue.write(cmd) + return err + } else { + valueLength, err := operation.encoder(operation, nil) + if err != nil { + return err + } + + cmd.WriteInt32(int32(nameLength + valueLength + 4)) + cmd.WriteByte((operation.opType.op)) + cmd.WriteByte((byte(ParticleType.BLOB))) + cmd.WriteByte((byte(0))) + cmd.WriteByte((byte(nameLength))) + cmd.dataOffset += nameLength + _, err = operation.encoder(operation, cmd) + //mark the operation as used, so that it will be cached the next time it is used + operation.used = err == nil + return err + } +} + +func (cmd *baseCommand) writeOperationForBinName(name string, operation OperationType) { + nameLength := copy(cmd.dataBuffer[(cmd.dataOffset+int(_OPERATION_HEADER_SIZE)):], name) + cmd.WriteInt32(int32(nameLength + 4)) + cmd.WriteByte((operation.op)) + cmd.WriteByte(byte(0)) + cmd.WriteByte(byte(0)) + cmd.WriteByte(byte(nameLength)) + cmd.dataOffset += nameLength +} + +func (cmd *baseCommand) writeOperationForOperationType(operation OperationType) { + cmd.WriteInt32(int32(4)) + cmd.WriteByte(operation.op) + cmd.WriteByte(0) + cmd.WriteByte(0) + cmd.WriteByte(0) +} + +// TODO: Remove this method and move it to the appropriate VALUE method +func (cmd *baseCommand) checkServerCompatibility(val Value) { + if val == nil { + return + } + + // check for float support + switch val.GetType() { + case ParticleType.FLOAT: + if !cmd.node.supportsFloat.Get() { + panic("This cluster node doesn't support double precision floating-point values.") + } + case ParticleType.GEOJSON: + if !cmd.node.supportsGeo.Get() { + panic("This cluster node doesn't support geo-spatial features.") + } + } +} + +func (cmd *baseCommand) writeFieldValue(value Value, ftype FieldType) error { + // check for float support + cmd.checkServerCompatibility(value) + + vlen, err := value.estimateSize() + if err != nil { + return err + } + cmd.writeFieldHeader(vlen+1, ftype) + cmd.WriteByte(byte(value.GetType())) + + _, err = value.write(cmd) + return err +} + +func (cmd *baseCommand) writeUdfArgs(value *ValueArray) error { + if value != nil { + vlen, err := value.estimateSize() + if err != nil { + return err + } + cmd.writeFieldHeader(vlen, UDF_ARGLIST) + _, err = value.pack(cmd) + return err + } + + cmd.writeFieldHeader(0, UDF_ARGLIST) + return nil + +} + +func (cmd *baseCommand) writeFieldString(str string, ftype FieldType) { + len := copy(cmd.dataBuffer[(cmd.dataOffset+int(_FIELD_HEADER_SIZE)):], str) + cmd.writeFieldHeader(len, ftype) + cmd.dataOffset += len +} + +func (cmd *baseCommand) writeFieldBytes(bytes []byte, ftype FieldType) { + copy(cmd.dataBuffer[cmd.dataOffset+int(_FIELD_HEADER_SIZE):], bytes) + + cmd.writeFieldHeader(len(bytes), ftype) + cmd.dataOffset += len(bytes) +} + +func (cmd *baseCommand) writeFieldHeader(size int, ftype FieldType) { + cmd.WriteInt32(int32(size + 1)) + cmd.WriteByte((byte(ftype))) +} + +// Int64ToBytes converts an int64 into slice of Bytes. +func (cmd *baseCommand) WriteInt64(num int64) (int, error) { + return cmd.WriteUint64(uint64(num)) +} + +// Uint64ToBytes converts an uint64 into slice of Bytes. +func (cmd *baseCommand) WriteUint64(num uint64) (int, error) { + binary.BigEndian.PutUint64(cmd.dataBuffer[cmd.dataOffset:cmd.dataOffset+8], num) + cmd.dataOffset += 8 + return 8, nil +} + +// Int32ToBytes converts an int32 to a byte slice of size 4 +func (cmd *baseCommand) WriteInt32(num int32) (int, error) { + return cmd.WriteUint32(uint32(num)) +} + +// Uint32ToBytes converts an uint32 to a byte slice of size 4 +func (cmd *baseCommand) WriteUint32(num uint32) (int, error) { + binary.BigEndian.PutUint32(cmd.dataBuffer[cmd.dataOffset:cmd.dataOffset+4], num) + cmd.dataOffset += 4 + return 4, nil +} + +// Int16ToBytes converts an int16 to slice of bytes +func (cmd *baseCommand) WriteInt16(num int16) (int, error) { + return cmd.WriteUint16(uint16(num)) +} + +// Int16ToBytes converts an int16 to slice of bytes +func (cmd *baseCommand) WriteUint16(num uint16) (int, error) { + binary.BigEndian.PutUint16(cmd.dataBuffer[cmd.dataOffset:cmd.dataOffset+2], num) + cmd.dataOffset += 2 + return 2, nil +} + +func (cmd *baseCommand) WriteFloat32(float float32) (int, error) { + bits := math.Float32bits(float) + binary.BigEndian.PutUint32(cmd.dataBuffer[cmd.dataOffset:cmd.dataOffset+4], bits) + cmd.dataOffset += 4 + return 4, nil +} + +func (cmd *baseCommand) WriteFloat64(float float64) (int, error) { + bits := math.Float64bits(float) + binary.BigEndian.PutUint64(cmd.dataBuffer[cmd.dataOffset:cmd.dataOffset+8], bits) + cmd.dataOffset += 8 + return 8, nil +} + +func (cmd *baseCommand) WriteByte(b byte) error { + cmd.dataBuffer[cmd.dataOffset] = b + cmd.dataOffset++ + return nil +} + +func (cmd *baseCommand) WriteString(s string) (int, error) { + copy(cmd.dataBuffer[cmd.dataOffset:cmd.dataOffset+len(s)], s) + cmd.dataOffset += len(s) + return len(s), nil +} + +func (cmd *baseCommand) Write(b []byte) (int, error) { + copy(cmd.dataBuffer[cmd.dataOffset:cmd.dataOffset+len(b)], b) + cmd.dataOffset += len(b) + return len(b), nil +} + +func (cmd *baseCommand) begin() { + cmd.dataOffset = int(_MSG_TOTAL_HEADER_SIZE) +} + +func (cmd *baseCommand) sizeBuffer() error { + return cmd.sizeBufferSz(cmd.dataOffset) +} + +func (cmd *baseCommand) validateHeader(header int64) error { + msgVersion := (uint64(header) & 0xFF00000000000000) >> 56 + if msgVersion != 2 { + return NewAerospikeError(PARSE_ERROR, fmt.Sprintf("Invalid Message Header: Expected version to be 2, but got %v", msgVersion)) + } + + msgType := uint64((uint64(header) & 0x00FF000000000000)) >> 49 + if !(msgType == 1 || msgType == 3) { + return NewAerospikeError(PARSE_ERROR, fmt.Sprintf("Invalid Message Header: Expected type to be 1 or 3, but got %v", msgType)) + } + + msgSize := int64((header & 0x0000FFFFFFFFFFFF)) + if msgSize > int64(MaxBufferSize) { + return NewAerospikeError(PARSE_ERROR, fmt.Sprintf("Invalid Message Header: Expected size to be under 10MiB, but got %v", msgSize)) + } + + return nil +} + +var ( + // MaxBufferSize protects against allocating massive memory blocks + // for buffers. Tweak this number if you are returning a lot of + // LDT elements in your queries. + MaxBufferSize = 1024 * 1024 * 10 // 10 MB +) + +func (cmd *baseCommand) sizeBufferSz(size int) error { + // Corrupted data streams can result in a huge length. + // Do a sanity check here. + if size > MaxBufferSize || size < 0 { + return NewAerospikeError(PARSE_ERROR, fmt.Sprintf("Invalid size for buffer: %d", size)) + } + + if size <= len(cmd.dataBuffer) { + // don't touch the buffer + } else if size <= cap(cmd.dataBuffer) { + cmd.dataBuffer = cmd.dataBuffer[:size] + } else { + // not enough space + cmd.dataBuffer = make([]byte, size) + } + + return nil +} + +func (cmd *baseCommand) end() { + var size = int64(cmd.dataOffset-8) | (_CL_MSG_VERSION << 56) | (_AS_MSG_TYPE << 48) + // Buffer.Int64ToBytes(size, cmd.dataBuffer, 0) + binary.BigEndian.PutUint64(cmd.dataBuffer[0:], uint64(size)) +} + +//////////////////////////////////// + +// SetCommandBufferPool can be used to customize the command Buffer Pool parameters to calibrate +// the pool for different workloads +// This method is deprecated. +func SetCommandBufferPool(poolSize, initBufSize, maxBufferSize int) { + panic("There is no need to optimize the buffer pool anymore. Buffers have moved to Connection object.") +} + +func (cmd *baseCommand) execute(ifc command) (err error) { + policy := ifc.getPolicy(ifc).GetBasePolicy() + iterations := -1 + + // for exponential backoff + interval := policy.SleepBetweenRetries + + // set timeout outside the loop + deadline := time.Now().Add(policy.Timeout) + + // Execute command until successful, timed out or maximum iterations have been reached. + for { + // too many retries + if iterations++; (policy.MaxRetries <= 0 && iterations > 0) || (policy.MaxRetries > 0 && iterations > policy.MaxRetries) { + return NewAerospikeError(TIMEOUT, fmt.Sprintf("command execution timed out: Exceeded number of retries. See `Policy.MaxRetries`. (last error: %s)", err)) + } + + // Sleep before trying again, after the first iteration + if iterations > 0 && policy.SleepBetweenRetries > 0 { + time.Sleep(interval) + if policy.SleepMultiplier > 1 { + interval = time.Duration(float64(interval) * policy.SleepMultiplier) + } + } + + // check for command timeout + if policy.Timeout > 0 && time.Now().After(deadline) { + break + } + + // set command node, so when you return a record it has the node + cmd.node, err = ifc.getNode(ifc) + if cmd.node == nil || !cmd.node.IsActive() || err != nil { + // Node is currently inactive. Retry. + continue + } + + // cmd.conn, err = cmd.node.GetConnection(policy.Timeout) + cmd.conn, err = ifc.getConnection(policy.Timeout) + if err != nil { + Logger.Warn("Node " + cmd.node.String() + ": " + err.Error()) + continue + } + + // Assign the connection buffer to the command buffer + cmd.dataBuffer = cmd.conn.dataBuffer + + // Set command buffer. + err = ifc.writeBuffer(ifc) + if err != nil { + // All runtime exceptions are considered fatal. Do not retry. + // Close socket to flush out possible garbage. Do not put back in pool. + cmd.conn.Close() + return err + } + + // Reset timeout in send buffer (destined for server) and socket. + // Buffer.Int32ToBytes(int32(policy.Timeout/time.Millisecond), cmd.dataBuffer, 22) + binary.BigEndian.PutUint32(cmd.dataBuffer[22:], uint32(policy.Timeout/time.Millisecond)) + + // Send command. + _, err = cmd.conn.Write(cmd.dataBuffer[:cmd.dataOffset]) + if err != nil { + // IO errors are considered temporary anomalies. Retry. + // Close socket to flush out possible garbage. Do not put back in pool. + cmd.conn.Close() + + Logger.Warn("Node " + cmd.node.String() + ": " + err.Error()) + continue + } + + // Parse results. + err = ifc.parseResult(ifc, cmd.conn) + if err != nil { + if err == io.EOF { + // IO errors are considered temporary anomalies. Retry. + // Close socket to flush out possible garbage. Do not put back in pool. + cmd.conn.Close() + + Logger.Warn("Node " + cmd.node.String() + ": " + err.Error()) + + // retry only for non-streaming commands + if !cmd.oneShot { + continue + } + } + + // close the connection + // cancelling/closing the batch/multi commands will return an error, which will + // close the connection to throw away its data and signal the server about the + // situation. We will not put back the connection in the buffer. + if cmd.conn.IsConnected() && KeepConnection(err) { + // Put connection back in pool. + cmd.node.PutConnection(cmd.conn) + } else { + cmd.conn.Close() + + } + return err + } + + // in case it has grown and re-allocated + cmd.conn.dataBuffer = cmd.dataBuffer + + // Put connection back in pool. + // cmd.node.PutConnection(cmd.conn) + ifc.putConnection(cmd.conn) + + // command has completed successfully. Exit method. + return nil + + } + + // execution timeout + return NewAerospikeError(TIMEOUT, "command execution timed out: See `Policy.Timeout`") +} + +func (cmd *baseCommand) parseRecordResults(ifc command, receiveSize int) (bool, error) { + panic(errors.New("Abstract method. Should not end up here")) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/commit_policy.go b/vendor/github.com/aerospike/aerospike-client-go/commit_policy.go new file mode 100644 index 00000000000..6b0f5be03db --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/commit_policy.go @@ -0,0 +1,29 @@ +/* + * Copyright 2013-2017 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package aerospike + +// CommitLevel indicates the desired consistency guarantee when committing a transaction on the server. +type CommitLevel int + +const ( + // COMMIT_ALL indicates the server should wait until successfully committing master and all replicas. + COMMIT_ALL CommitLevel = iota + + // COMMIT_MASTER indicates the server should wait until successfully committing master only. + COMMIT_MASTER +) diff --git a/vendor/github.com/aerospike/aerospike-client-go/compat_after_go1.8.go b/vendor/github.com/aerospike/aerospike-client-go/compat_after_go1.8.go new file mode 100644 index 00000000000..101f05d8c39 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/compat_after_go1.8.go @@ -0,0 +1,25 @@ +// +build go1.8 + +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "crypto/tls" +) + +func cloneTlsConfig(c *tls.Config) *tls.Config { + return c.Clone() +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/compat_before_go1.8.go b/vendor/github.com/aerospike/aerospike-client-go/compat_before_go1.8.go new file mode 100644 index 00000000000..26bd1b783d7 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/compat_before_go1.8.go @@ -0,0 +1,44 @@ +// +build !go1.8 + +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "crypto/tls" +) + +func cloneTlsConfig(c *tls.Config) *tls.Config { + // .Clone() method is not available in go versions before 1.8 + return &tls.Config{ + Certificates: c.Certificates, + CipherSuites: c.CipherSuites, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + ClientSessionCache: c.ClientSessionCache, + CurvePreferences: c.CurvePreferences, + GetCertificate: c.GetCertificate, + InsecureSkipVerify: c.InsecureSkipVerify, + MaxVersion: c.MaxVersion, + MinVersion: c.MinVersion, + NameToCertificate: c.NameToCertificate, + NextProtos: c.NextProtos, + PreferServerCipherSuites: c.PreferServerCipherSuites, + Rand: c.Rand, + RootCAs: c.RootCAs, + ServerName: c.ServerName, + Time: c.Time, + } +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/connection.go b/vendor/github.com/aerospike/aerospike-client-go/connection.go new file mode 100644 index 00000000000..42fe2bafeb5 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/connection.go @@ -0,0 +1,257 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "crypto/tls" + "io" + "net" + "strconv" + "time" + + . "github.com/aerospike/aerospike-client-go/logger" + . "github.com/aerospike/aerospike-client-go/types" +) + +// Connection represents a connection with a timeout. +type Connection struct { + node *Node + + // timeout + timeout time.Duration + + // duration after which connection is considered idle + idleTimeout time.Duration + idleDeadline time.Time + + // connection object + conn net.Conn + + // to avoid having a buffer pool and contention + dataBuffer []byte +} + +func errToTimeoutErr(err error) error { + if err, ok := err.(net.Error); ok && err.Timeout() { + return NewAerospikeError(TIMEOUT, err.Error()) + } + return err +} + +func shouldClose(err error) bool { + if err == io.EOF { + return true + } + + if err, ok := err.(net.Error); ok && err.Timeout() { + return true + } + + return false +} + +// NewConnection creates a connection on the network and returns the pointer +// A minimum timeout of 2 seconds will always be applied. +// If the connection is not established in the specified timeout, +// an error will be returned +func NewConnection(address string, timeout time.Duration) (*Connection, error) { + newConn := &Connection{dataBuffer: make([]byte, 1024)} + + // don't wait indefinitely + if timeout == 0 { + timeout = 5 * time.Second + } + + conn, err := net.DialTimeout("tcp", address, timeout) + if err != nil { + Logger.Error("Connection to address `" + address + "` failed to establish with error: " + err.Error()) + return nil, errToTimeoutErr(err) + } + newConn.conn = conn + + // set timeout at the last possible moment + if err := newConn.SetTimeout(timeout); err != nil { + return nil, err + } + return newConn, nil +} + +// NewSecureConnection creates a TLS connection on the network and returns the pointer. +// A minimum timeout of 2 seconds will always be applied. +// If the connection is not established in the specified timeout, +// an error will be returned +func NewSecureConnection(policy *ClientPolicy, host *Host) (*Connection, error) { + address := net.JoinHostPort(host.Name, strconv.Itoa(host.Port)) + conn, err := NewConnection(address, policy.Timeout) + if err != nil { + return nil, err + } + + if policy.TlsConfig == nil { + return conn, nil + } + + // Use version dependent clone function to clone the config + tlsConfig := cloneTlsConfig(policy.TlsConfig) + tlsConfig.ServerName = host.TLSName + + sconn := tls.Client(conn.conn, tlsConfig) + if err := sconn.Handshake(); err != nil { + sconn.Close() + return nil, err + } + + if host.TLSName != "" && !tlsConfig.InsecureSkipVerify { + if err := sconn.VerifyHostname(host.TLSName); err != nil { + sconn.Close() + Logger.Error("Connection to address `" + address + "` failed to establish with error: " + err.Error()) + return nil, errToTimeoutErr(err) + } + } + + conn.conn = sconn + return conn, nil +} + +// Write writes the slice to the connection buffer. +func (ctn *Connection) Write(buf []byte) (total int, err error) { + // make sure all bytes are written + // Don't worry about the loop, timeout has been set elsewhere + length := len(buf) + var r int + for total < length { + if r, err = ctn.conn.Write(buf[total:]); err != nil { + break + } + total += r + } + + if err == nil { + return total, nil + } + return total, errToTimeoutErr(err) +} + +// ReadN reads N bytes from connection buffer to the provided Writer. +func (ctn *Connection) ReadN(buf io.Writer, length int64) (total int64, err error) { + // if all bytes are not read, retry until successful + // Don't worry about the loop; we've already set the timeout elsewhere + total, err = io.CopyN(buf, ctn.conn, length) + + if err == nil && total == length { + return total, nil + } else if err != nil { + if shouldClose(err) { + ctn.Close() + } + return total, errToTimeoutErr(err) + } + ctn.Close() + return total, NewAerospikeError(SERVER_ERROR) +} + +// Read reads from connection buffer to the provided slice. +func (ctn *Connection) Read(buf []byte, length int) (total int, err error) { + // if all bytes are not read, retry until successful + // Don't worry about the loop; we've already set the timeout elsewhere + var r int + for total < length { + r, err = ctn.conn.Read(buf[total:length]) + total += r + if err != nil { + break + } + } + + if err == nil && total == length { + return total, nil + } else if err != nil { + if shouldClose(err) { + ctn.Close() + } + return total, errToTimeoutErr(err) + } + ctn.Close() + return total, NewAerospikeError(SERVER_ERROR) +} + +// IsConnected returns true if the connection is not closed yet. +func (ctn *Connection) IsConnected() bool { + return ctn.conn != nil +} + +// SetTimeout sets connection timeout for both read and write operations. +func (ctn *Connection) SetTimeout(timeout time.Duration) error { + // Set timeout ONLY if there is or has been a timeout + if timeout > 0 || ctn.timeout != 0 { + ctn.timeout = timeout + + // important: remove deadline when not needed; connections are pooled + if ctn.conn != nil { + var deadline time.Time + if timeout > 0 { + deadline = time.Now().Add(timeout) + } + if err := ctn.conn.SetDeadline(deadline); err != nil { + return err + } + } + } + + return nil +} + +// Close closes the connection +func (ctn *Connection) Close() { + if ctn != nil && ctn.conn != nil { + // deregister + if ctn.node != nil { + ctn.node.connectionCount.DecrementAndGet() + } + + if err := ctn.conn.Close(); err != nil { + Logger.Warn(err.Error()) + } + ctn.conn = nil + } +} + +// Authenticate will send authentication information to the server. +func (ctn *Connection) Authenticate(user string, password []byte) error { + // need to authenticate + if user != "" { + command := newAdminCommand(ctn.dataBuffer) + if err := command.authenticate(ctn, user, password); err != nil { + // Socket not authenticated. Do not put back into pool. + return err + } + } + return nil +} + +// setIdleTimeout sets the idle timeout for the connection. +func (ctn *Connection) setIdleTimeout(timeout time.Duration) { + ctn.idleTimeout = timeout +} + +// isIdle returns true if the connection has reached the idle deadline. +func (ctn *Connection) isIdle() bool { + return ctn.idleTimeout > 0 && !time.Now().Before(ctn.idleDeadline) +} + +// refresh extends the idle deadline of the connection. +func (ctn *Connection) refresh() { + ctn.idleDeadline = time.Now().Add(ctn.idleTimeout) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/connection_queue.go b/vendor/github.com/aerospike/aerospike-client-go/connection_queue.go new file mode 100644 index 00000000000..87f995bd480 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/connection_queue.go @@ -0,0 +1,163 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "runtime" + "sync" +) + +// singleConnectionQueue is a non-blocking FIFO queue. +// If the queue is empty, nil is returned. +// if the queue is full, offer will return false +type singleConnectionQueue struct { + head, tail uint32 + data []*Connection + size uint32 + wrapped bool + mutex sync.Mutex +} + +// NewQueue creates a new queue with initial size. +func newSingleConnectionQueue(size int) *singleConnectionQueue { + if size <= 0 { + panic("Queue size cannot be less than 1") + } + + return &singleConnectionQueue{ + wrapped: false, + data: make([]*Connection, uint32(size)), + size: uint32(size), + } +} + +// Offer adds an item to the queue unless the queue is full. +// In case the queue is full, the item will not be added to the queue +// and false will be returned +func (q *singleConnectionQueue) Offer(conn *Connection) bool { + q.mutex.Lock() + + // make sure queue is not full + if q.tail == q.head && q.wrapped { + q.mutex.Unlock() + return false + } + + if q.head+1 == q.size { + q.wrapped = true + } + + q.head = (q.head + 1) % q.size + q.data[q.head] = conn + q.mutex.Unlock() + return true +} + +// Poll removes and returns an item from the queue. +// If the queue is empty, nil will be returned. +func (q *singleConnectionQueue) Poll() (res *Connection) { + q.mutex.Lock() + + // if queue is not empty + if q.wrapped || (q.tail != q.head) { + if q.tail+1 == q.size { + q.wrapped = false + } + q.tail = (q.tail + 1) % q.size + res = q.data[q.tail] + } + + q.mutex.Unlock() + return res +} + +// singleConnectionQueue is a non-blocking FIFO queue. +// If the queue is empty, nil is returned. +// if the queue is full, offer will return false +type connectionQueue struct { + queues []singleConnectionQueue +} + +func newConnectionQueue(size int) *connectionQueue { + queueCount := runtime.NumCPU() + if queueCount > size { + queueCount = size + } + + // will be >= 1 + perQueueSize := size / queueCount + + queues := make([]singleConnectionQueue, queueCount) + for i := range queues { + queues[i] = *newSingleConnectionQueue(perQueueSize) + } + + // add a queue for the remainder + if (perQueueSize*queueCount)-size > 0 { + queues = append(queues, *newSingleConnectionQueue(size - queueCount*perQueueSize)) + } + + return &connectionQueue{ + queues: queues, + } +} + +// Offer adds an item to the queue unless the queue is full. +// In case the queue is full, the item will not be added to the queue +// and false will be returned +func (q *connectionQueue) Offer(conn *Connection, hint byte) bool { + idx := int(hint) % len(q.queues) + end := idx + len(q.queues) + for i := idx; i < end; i++ { + if success := q.queues[i%len(q.queues)].Offer(conn); success { + return true + } + } + return false +} + +// Poll removes and returns an item from the queue. +// If the queue is empty, nil will be returned. +func (q *connectionQueue) Poll(hint byte) (res *Connection) { + // fmt.Println(int(hint) % len(q.queues)) + + idx := int(hint) + + end := idx + len(q.queues) + for i := idx; i < end; i++ { + if conn := q.queues[i%len(q.queues)].Poll(); conn != nil { + return conn + } + } + return nil +} + +// DropIdle closes all idle connections. +func (q *connectionQueue) DropIdle() { +L: + for i := 0; i < len(q.queues); i++ { + for conn := q.queues[i].Poll(); conn != nil; conn = q.queues[i].Poll() { + if conn.IsConnected() && !conn.isIdle() { + // put it back: this connection is the oldest, and is still fresh + // so the ones after it are likely also fresh + if !q.queues[i].Offer(conn) { + conn.Close() + } + continue L + } + conn.Close() + } + } +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/consistency_level.go b/vendor/github.com/aerospike/aerospike-client-go/consistency_level.go new file mode 100644 index 00000000000..df90e554681 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/consistency_level.go @@ -0,0 +1,32 @@ +/* + * Copyright 2013-2017 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package aerospike + +// ConsistencyLevel indicates how replicas should be consulted in a read +// operation to provide the desired consistency guarantee. +type ConsistencyLevel int + +const ( + // CONSISTENCY_ONE indicates only a single replica should be consulted in + // the read operation. + CONSISTENCY_ONE = iota + + // CONSISTENCY_ALL indicates that all replicas should be consulted in + // the read operation. + CONSISTENCY_ALL +) diff --git a/vendor/github.com/aerospike/aerospike-client-go/delete_command.go b/vendor/github.com/aerospike/aerospike-client-go/delete_command.go new file mode 100644 index 00000000000..1cc0676322a --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/delete_command.go @@ -0,0 +1,82 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + . "github.com/aerospike/aerospike-client-go/types" + Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" +) + +// guarantee deleteCommand implements command interface +var _ command = &deleteCommand{} + +type deleteCommand struct { + singleCommand + + policy *WritePolicy + existed bool +} + +func newDeleteCommand(cluster *Cluster, policy *WritePolicy, key *Key) *deleteCommand { + newDeleteCmd := &deleteCommand{ + singleCommand: newSingleCommand(cluster, key), + policy: policy, + } + + return newDeleteCmd +} + +func (cmd *deleteCommand) getPolicy(ifc command) Policy { + return cmd.policy +} + +func (cmd *deleteCommand) writeBuffer(ifc command) error { + return cmd.setDelete(cmd.policy, cmd.key) +} + +func (cmd *deleteCommand) getNode(ifc command) (*Node, error) { + return cmd.cluster.getMasterNode(&cmd.partition) +} + +func (cmd *deleteCommand) parseResult(ifc command, conn *Connection) error { + // Read header. + if _, err := conn.Read(cmd.dataBuffer, int(_MSG_TOTAL_HEADER_SIZE)); err != nil { + return err + } + + header := Buffer.BytesToInt64(cmd.dataBuffer, 0) + + // Validate header to make sure we are at the beginning of a message + if err := cmd.validateHeader(header); err != nil { + return err + } + + resultCode := cmd.dataBuffer[13] & 0xFF + + if resultCode != 0 && ResultCode(resultCode) != KEY_NOT_FOUND_ERROR { + return NewAerospikeError(ResultCode(resultCode)) + } + cmd.existed = resultCode == 0 + + return cmd.emptySocket(conn) +} + +func (cmd *deleteCommand) Existed() bool { + return cmd.existed +} + +func (cmd *deleteCommand) Execute() error { + return cmd.execute(cmd) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/execute_command.go b/vendor/github.com/aerospike/aerospike-client-go/execute_command.go new file mode 100644 index 00000000000..c769ae0b865 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/execute_command.go @@ -0,0 +1,54 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +type executeCommand struct { + readCommand + + // overwrite + policy *WritePolicy + packageName string + functionName string + args *ValueArray +} + +func newExecuteCommand( + cluster *Cluster, + policy *WritePolicy, + key *Key, + packageName string, + functionName string, + args *ValueArray, +) executeCommand { + return executeCommand{ + readCommand: newReadCommand(cluster, &policy.BasePolicy, key, nil), + policy: policy, + packageName: packageName, + functionName: functionName, + args: args, + } +} + +func (cmd *executeCommand) writeBuffer(ifc command) error { + return cmd.setUdf(cmd.policy, cmd.key, cmd.packageName, cmd.functionName, cmd.args) +} + +func (cmd *executeCommand) getNode(ifc command) (*Node, error) { + return cmd.cluster.getMasterNode(&cmd.partition) +} + +func (cmd *executeCommand) Execute() error { + return cmd.execute(cmd) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/execute_task.go b/vendor/github.com/aerospike/aerospike-client-go/execute_task.go new file mode 100644 index 00000000000..ac1b9701020 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/execute_task.go @@ -0,0 +1,104 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "strconv" + "strings" + + . "github.com/aerospike/aerospike-client-go/types" +) + +// ExecuteTask is used to poll for long running server execute job completion. +type ExecuteTask struct { + *baseTask + + taskId uint64 + scan bool +} + +// NewExecuteTask initializes task with fields needed to query server nodes. +func NewExecuteTask(cluster *Cluster, statement *Statement) *ExecuteTask { + return &ExecuteTask{ + baseTask: newTask(cluster, false), + taskId: statement.TaskId, + scan: statement.IsScan(), + } +} + +// IsDone queries all nodes for task completion status. +func (etsk *ExecuteTask) IsDone() (bool, error) { + var module string + if etsk.scan { + module = "scan" + } else { + module = "query" + } + + command := "jobs:module=" + module + ";cmd=get-job;trid=" + strconv.FormatUint(etsk.taskId, 10) + + nodes := etsk.cluster.GetNodes() + + for _, node := range nodes { + responseMap, err := node.RequestInfo(command) + if err != nil { + return false, err + } + response := responseMap[command] + + if strings.HasPrefix(response, "ERROR:2") { + // Task not found. This could mean task already completed or + // task not started yet. We are going to have to assume that + // the task already completed... + continue + } + + if strings.HasPrefix(response, "ERROR:") { + // Mark done and quit immediately. + return false, NewAerospikeError(UDF_BAD_RESPONSE, response) + } + + find := "status=" + index := strings.Index(response, find) + + if index < 0 { + return false, nil + } + + begin := index + len(find) + response = response[begin:] + find = ":" + index = strings.Index(response, find) + + if index < 0 { + continue + } + + status := strings.ToLower(response[:index]) + if !strings.HasPrefix(status, "done") { + return false, nil + } + } + + return true, nil +} + +// OnComplete returns a channel which will be closed when the task is +// completed. +// If an error is encountered while performing the task, an error +// will be sent on the channel. +func (etsk *ExecuteTask) OnComplete() chan error { + return etsk.onComplete(etsk) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/exists_command.go b/vendor/github.com/aerospike/aerospike-client-go/exists_command.go new file mode 100644 index 00000000000..fa3724ef01e --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/exists_command.go @@ -0,0 +1,79 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + . "github.com/aerospike/aerospike-client-go/types" + Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" +) + +// guarantee existsCommand implements command interface +var _ command = &existsCommand{} + +type existsCommand struct { + singleCommand + + policy *BasePolicy + exists bool +} + +func newExistsCommand(cluster *Cluster, policy *BasePolicy, key *Key) *existsCommand { + return &existsCommand{ + singleCommand: newSingleCommand(cluster, key), + policy: policy, + } +} + +func (cmd *existsCommand) getPolicy(ifc command) Policy { + return cmd.policy +} + +func (cmd *existsCommand) writeBuffer(ifc command) error { + return cmd.setExists(cmd.policy, cmd.key) +} + +func (cmd *existsCommand) getNode(ifc command) (*Node, error) { + return cmd.cluster.getReadNode(&cmd.partition, cmd.policy.ReplicaPolicy) +} + +func (cmd *existsCommand) parseResult(ifc command, conn *Connection) error { + // Read header. + if _, err := conn.Read(cmd.dataBuffer, int(_MSG_TOTAL_HEADER_SIZE)); err != nil { + return err + } + + header := Buffer.BytesToInt64(cmd.dataBuffer, 0) + + // Validate header to make sure we are at the beginning of a message + if err := cmd.validateHeader(header); err != nil { + return err + } + + resultCode := cmd.dataBuffer[13] & 0xFF + + if resultCode != 0 && ResultCode(resultCode) != KEY_NOT_FOUND_ERROR { + return NewAerospikeError(ResultCode(resultCode)) + } + cmd.exists = resultCode == 0 + return cmd.emptySocket(conn) +} + +func (cmd *existsCommand) Exists() bool { + return cmd.exists +} + +func (cmd *existsCommand) Execute() error { + return cmd.execute(cmd) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/field_type.go b/vendor/github.com/aerospike/aerospike-client-go/field_type.go new file mode 100644 index 00000000000..3f00bf78cd8 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/field_type.go @@ -0,0 +1,48 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +// FieldType represents the type of the field in Aerospike Wire Protocol +type FieldType int + +// FieldType constants used in the Aerospike Wire Protocol. +const ( + NAMESPACE FieldType = 0 + TABLE FieldType = 1 + KEY FieldType = 2 + + //BIN FieldType = 3; + + DIGEST_RIPE FieldType = 4 + + //GU_TID FieldType = 5; + + DIGEST_RIPE_ARRAY FieldType = 6 + TRAN_ID FieldType = 7 // user supplied transaction id, which is simply passed back + SCAN_OPTIONS FieldType = 8 + SCAN_TIMEOUT FieldType = 9 + INDEX_NAME FieldType = 21 + INDEX_RANGE FieldType = 22 + INDEX_FILTER FieldType = 23 + INDEX_LIMIT FieldType = 24 + INDEX_ORDER_BY FieldType = 25 + INDEX_TYPE = 26 + UDF_PACKAGE_NAME FieldType = 30 + UDF_FUNCTION FieldType = 31 + UDF_ARGLIST FieldType = 32 + UDF_OP FieldType = 33 + QUERY_BINLIST FieldType = 40 + PREDEXP FieldType = 43 +) diff --git a/vendor/github.com/aerospike/aerospike-client-go/filter.go b/vendor/github.com/aerospike/aerospike-client-go/filter.go new file mode 100644 index 00000000000..4664b9f455e --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/filter.go @@ -0,0 +1,194 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "fmt" + + ParticleType "github.com/aerospike/aerospike-client-go/types/particle_type" +) + +// Filter specifies a query filter definition. +type Filter struct { + name string + idxType IndexCollectionType + valueParticleType int + begin Value + end Value +} + +// NewEqualFilter creates a new equality filter instance for query. +func NewEqualFilter(binName string, value interface{}) *Filter { + val := NewValue(value) + return newFilter(binName, ICT_DEFAULT, val.GetType(), val, val) +} + +// NewRangeFilter creates a range filter for query. +// Range arguments must be int64 values. +// String ranges are not supported. +func NewRangeFilter(binName string, begin int64, end int64) *Filter { + vBegin, vEnd := NewValue(begin), NewValue(end) + return newFilter(binName, ICT_DEFAULT, vBegin.GetType(), vBegin, vEnd) +} + +// NewContainsFilter creates a contains filter for query on collection index. +func NewContainsFilter(binName string, indexCollectionType IndexCollectionType, value interface{}) *Filter { + v := NewValue(value) + return newFilter(binName, indexCollectionType, v.GetType(), v, v) +} + +// NewContainsRangeFilter creates a contains filter for query on ranges of data in a collection index. +func NewContainsRangeFilter(binName string, indexCollectionType IndexCollectionType, begin, end int64) *Filter { + vBegin, vEnd := NewValue(begin), NewValue(end) + return newFilter(binName, indexCollectionType, vBegin.GetType(), vBegin, vEnd) +} + +// NewGeoWithinRegionFilter creates a geospatial "within region" filter for query. +// Argument must be a valid GeoJSON region. +func NewGeoWithinRegionFilter(binName, region string) *Filter { + v := NewStringValue(region) + return newFilter(binName, ICT_DEFAULT, ParticleType.GEOJSON, v, v) +} + +// NewGeoWithinRegionForCollectionFilter creates a geospatial "within region" filter for query on collection index. +// Argument must be a valid GeoJSON region. +func NewGeoWithinRegionForCollectionFilter(binName string, collectionType IndexCollectionType, region string) *Filter { + v := NewStringValue(region) + return newFilter(binName, collectionType, ParticleType.GEOJSON, v, v) +} + +// NewGeoRegionsContainingPointFilter creates a geospatial "containing point" filter for query. +// Argument must be a valid GeoJSON point. +func NewGeoRegionsContainingPointFilter(binName, point string) *Filter { + v := NewStringValue(point) + return newFilter(binName, ICT_DEFAULT, ParticleType.GEOJSON, v, v) +} + +// NewGeoRegionsContainingPointForCollectionFilter creates a geospatial "containing point" filter for query on collection index. +// Argument must be a valid GeoJSON point. +func NewGeoRegionsContainingPointForCollectionFilter(binName string, collectionType IndexCollectionType, point string) *Filter { + v := NewStringValue(point) + return newFilter(binName, collectionType, ParticleType.GEOJSON, v, v) +} + +// NewGeoWithinRadiusFilter creates a geospatial "within radius" filter for query. +// Arguments must be valid longitude/latitude/radius (meters) values. +func NewGeoWithinRadiusFilter(binName string, lng, lat, radius float64) *Filter { + rgnStr := fmt.Sprintf("{ \"type\": \"AeroCircle\", "+"\"coordinates\": [[%.8f, %.8f], %f] }", lng, lat, radius) + return newFilter(binName, ICT_DEFAULT, ParticleType.GEOJSON, NewValue(rgnStr), NewValue(rgnStr)) +} + +// NewGeoWithinRadiusForCollectionFilter creates a geospatial "within radius" filter for query on collection index. +// Arguments must be valid longitude/latitude/radius (meters) values. +func NewGeoWithinRadiusForCollectionFilter(binName string, collectionType IndexCollectionType, lng, lat, radius float64) *Filter { + rgnStr := fmt.Sprintf("{ \"type\": \"AeroCircle\", "+"\"coordinates\": [[%.8f, %.8f], %f] }", lng, lat, radius) + return newFilter(binName, collectionType, ParticleType.GEOJSON, NewValue(rgnStr), NewValue(rgnStr)) +} + +// Create a filter for query. +// Range arguments must be longs or integers which can be cast to longs. +// String ranges are not supported. +func newFilter(name string, indexCollectionType IndexCollectionType, valueParticleType int, begin Value, end Value) *Filter { + return &Filter{ + name: name, + idxType: indexCollectionType, + valueParticleType: valueParticleType, + begin: begin, + end: end, + } +} + +// IndexType return filter's index type. +func (fltr *Filter) IndexCollectionType() IndexCollectionType { + return fltr.idxType +} + +func (fltr *Filter) estimateSize() (int, error) { + // bin name size(1) + particle type size(1) + begin particle size(4) + end particle size(4) = 10 + szBegin, err := fltr.begin.estimateSize() + if err != nil { + return szBegin, err + } + + szEnd, err := fltr.end.estimateSize() + if err != nil { + return szEnd, err + } + + return len(fltr.name) + szBegin + szEnd + 10, nil +} + +func (fltr *Filter) write(cmd *baseCommand) (int, error) { + size := 0 + + // Write name length + err := cmd.WriteByte(byte(len(fltr.name))) + if err != nil { + return 0, err + } + size++ + + // Write Name + n, err := cmd.WriteString(fltr.name) + if err != nil { + return size + n, err + } + size += n + + // Write particle type. + err = cmd.WriteByte(byte(fltr.valueParticleType)) + if err != nil { + return size, err + } + size++ + + // Write filter begin. + esz, err := fltr.begin.estimateSize() + if err != nil { + return size, err + } + + n, err = cmd.WriteInt32(int32(esz)) + if err != nil { + return size + n, err + } + size += n + + n, err = fltr.begin.write(cmd) + if err != nil { + return size + n, err + } + size += n + + // Write filter end. + esz, err = fltr.end.estimateSize() + if err != nil { + return size, err + } + + n, err = cmd.WriteInt32(int32(esz)) + if err != nil { + return size + n, err + } + size += n + + n, err = fltr.end.write(cmd) + if err != nil { + return size + n, err + } + size += n + + return size, nil +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/generation_policy.go b/vendor/github.com/aerospike/aerospike-client-go/generation_policy.go new file mode 100644 index 00000000000..eab652281df --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/generation_policy.go @@ -0,0 +1,30 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +// GenerationPolicy determines how to handle record writes based on record generation. +type GenerationPolicy int + +const ( + // NONE means: Do not use record generation to restrict writes. + NONE GenerationPolicy = iota + + // EXPECT_GEN_EQUAL means: Update/Delete record if expected generation is equal to server generation. Otherwise, fail. + EXPECT_GEN_EQUAL + + // EXPECT_GEN_GT means: Update/Delete record if expected generation greater than the server generation. Otherwise, fail. + // This is useful for restore after backup. + EXPECT_GEN_GT +) diff --git a/vendor/github.com/aerospike/aerospike-client-go/generics.go b/vendor/github.com/aerospike/aerospike-client-go/generics.go new file mode 100644 index 00000000000..c175f3872e7 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/generics.go @@ -0,0 +1,3643 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +type stringSlice []string + +// PackList packs StringSlice as msgpack. +func (ts stringSlice) PackList(buf BufferEx) (int, error) { + size := 0 + for _, elem := range ts { + n, err := PackString(buf, elem) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of StringSlice +func (ts stringSlice) Len() int { + return len(ts) +} + +type intSlice []int + +// PackList packs IntSlice as msgpack. +func (ts intSlice) PackList(buf BufferEx) (int, error) { + size := 0 + for _, elem := range ts { + n, err := PackInt64(buf, int64(elem)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of IntSlice +func (ts intSlice) Len() int { + return len(ts) +} + +type int8Slice []int8 + +// PackList packs Int8Slice as msgpack. +func (ts int8Slice) PackList(buf BufferEx) (int, error) { + size := 0 + for _, elem := range ts { + n, err := PackInt64(buf, int64(elem)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of Int8Slice +func (ts int8Slice) Len() int { + return len(ts) +} + +type int16Slice []int16 + +// PackList packs Int16Slice as msgpack. +func (ts int16Slice) PackList(buf BufferEx) (int, error) { + size := 0 + for _, elem := range ts { + n, err := PackInt64(buf, int64(elem)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of Int16Slice +func (ts int16Slice) Len() int { + return len(ts) +} + +type int32Slice []int32 + +// PackList packs Int32Slice as msgpack. +func (ts int32Slice) PackList(buf BufferEx) (int, error) { + size := 0 + for _, elem := range ts { + n, err := PackInt64(buf, int64(elem)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of Int32Slice +func (ts int32Slice) Len() int { + return len(ts) +} + +type int64Slice []int64 + +// PackList packs Int64Slice as msgpack. +func (ts int64Slice) PackList(buf BufferEx) (int, error) { + size := 0 + for _, elem := range ts { + n, err := PackInt64(buf, int64(elem)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of Int64Slice +func (ts int64Slice) Len() int { + return len(ts) +} + +type uint16Slice []uint16 + +// PackList packs Uint16Slice as msgpack. +func (ts uint16Slice) PackList(buf BufferEx) (int, error) { + size := 0 + for _, elem := range ts { + n, err := PackInt64(buf, int64(elem)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of Uint16Slice +func (ts uint16Slice) Len() int { + return len(ts) +} + +type uint32Slice []uint32 + +// PackList packs Uint32Slice as msgpack. +func (ts uint32Slice) PackList(buf BufferEx) (int, error) { + size := 0 + for _, elem := range ts { + n, err := PackInt64(buf, int64(elem)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of Uint32Slice +func (ts uint32Slice) Len() int { + return len(ts) +} + +type uint64Slice []uint64 + +// PackList packs Uint64Slice as msgpack. +func (ts uint64Slice) PackList(buf BufferEx) (int, error) { + size := 0 + for _, elem := range ts { + n, err := PackUInt64(buf, elem) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of Uint64Slice +func (ts uint64Slice) Len() int { + return len(ts) +} + +type float32Slice []float32 + +// PackList packs Float32Slice as msgpack. +func (ts float32Slice) PackList(buf BufferEx) (int, error) { + size := 0 + for _, elem := range ts { + n, err := PackFloat32(buf, elem) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of Float32Slice +func (ts float32Slice) Len() int { + return len(ts) +} + +type float64Slice []float64 + +// PackList packs Float64Slice as msgpack. +func (ts float64Slice) PackList(buf BufferEx) (int, error) { + size := 0 + for _, elem := range ts { + n, err := PackFloat64(buf, elem) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of Float64Slice +func (ts float64Slice) Len() int { + return len(ts) +} + +/////////////////////////////////////////////////////////////////////////////////////////// + +type stringStringMap map[string]string + +//PackMap packs TypeMap as msgpack. +func (tm stringStringMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackString(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackString(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm stringStringMap) Len() int { + return len(tm) +} + +type stringIntMap map[string]int + +//PackMap packs TypeMap as msgpack. +func (tm stringIntMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackString(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm stringIntMap) Len() int { + return len(tm) +} + +type stringInt8Map map[string]int8 + +//PackMap packs TypeMap as msgpack. +func (tm stringInt8Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackString(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm stringInt8Map) Len() int { + return len(tm) +} + +type stringInt16Map map[string]int16 + +//PackMap packs TypeMap as msgpack. +func (tm stringInt16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackString(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm stringInt16Map) Len() int { + return len(tm) +} + +type stringInt32Map map[string]int32 + +//PackMap packs TypeMap as msgpack. +func (tm stringInt32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackString(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm stringInt32Map) Len() int { + return len(tm) +} + +type stringInt64Map map[string]int64 + +//PackMap packs TypeMap as msgpack. +func (tm stringInt64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackString(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm stringInt64Map) Len() int { + return len(tm) +} + +type stringUint16Map map[string]uint16 + +//PackMap packs TypeMap as msgpack. +func (tm stringUint16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackString(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm stringUint16Map) Len() int { + return len(tm) +} + +type stringUint32Map map[string]uint32 + +//PackMap packs TypeMap as msgpack. +func (tm stringUint32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackString(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm stringUint32Map) Len() int { + return len(tm) +} + +type stringFloat32Map map[string]float32 + +//PackMap packs TypeMap as msgpack. +func (tm stringFloat32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackString(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat32(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm stringFloat32Map) Len() int { + return len(tm) +} + +type stringFloat64Map map[string]float64 + +//PackMap packs TypeMap as msgpack. +func (tm stringFloat64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackString(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm stringFloat64Map) Len() int { + return len(tm) +} + +type intStringMap map[int]string + +//PackMap packs TypeMap as msgpack. +func (tm intStringMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackString(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm intStringMap) Len() int { + return len(tm) +} + +type intIntMap map[int]int + +//PackMap packs TypeMap as msgpack. +func (tm intIntMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm intIntMap) Len() int { + return len(tm) +} + +type intInt8Map map[int]int8 + +//PackMap packs TypeMap as msgpack. +func (tm intInt8Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm intInt8Map) Len() int { + return len(tm) +} + +type intInt16Map map[int]int16 + +//PackMap packs TypeMap as msgpack. +func (tm intInt16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm intInt16Map) Len() int { + return len(tm) +} + +type intInt32Map map[int]int32 + +//PackMap packs TypeMap as msgpack. +func (tm intInt32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm intInt32Map) Len() int { + return len(tm) +} + +type intInt64Map map[int]int64 + +//PackMap packs TypeMap as msgpack. +func (tm intInt64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm intInt64Map) Len() int { + return len(tm) +} + +type intUint16Map map[int]uint16 + +//PackMap packs TypeMap as msgpack. +func (tm intUint16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm intUint16Map) Len() int { + return len(tm) +} + +type intUint32Map map[int]uint32 + +//PackMap packs TypeMap as msgpack. +func (tm intUint32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm intUint32Map) Len() int { + return len(tm) +} + +type intFloat32Map map[int]float32 + +//PackMap packs TypeMap as msgpack. +func (tm intFloat32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat32(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm intFloat32Map) Len() int { + return len(tm) +} + +type intFloat64Map map[int]float64 + +//PackMap packs TypeMap as msgpack. +func (tm intFloat64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm intFloat64Map) Len() int { + return len(tm) +} + +type intInterfaceMap map[int]interface{} + +//PackMap packs TypeMap as msgpack. +func (tm intInterfaceMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = __PackObject(buf, v, false) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm intInterfaceMap) Len() int { + return len(tm) +} + +type int8StringMap map[int8]string + +//PackMap packs TypeMap as msgpack. +func (tm int8StringMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackString(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int8StringMap) Len() int { + return len(tm) +} + +type int8IntMap map[int8]int + +//PackMap packs TypeMap as msgpack. +func (tm int8IntMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int8IntMap) Len() int { + return len(tm) +} + +type int8Int8Map map[int8]int8 + +//PackMap packs TypeMap as msgpack. +func (tm int8Int8Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int8Int8Map) Len() int { + return len(tm) +} + +type int8Int16Map map[int8]int16 + +//PackMap packs TypeMap as msgpack. +func (tm int8Int16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int8Int16Map) Len() int { + return len(tm) +} + +type int8Int32Map map[int8]int32 + +//PackMap packs TypeMap as msgpack. +func (tm int8Int32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int8Int32Map) Len() int { + return len(tm) +} + +type int8Int64Map map[int8]int64 + +//PackMap packs TypeMap as msgpack. +func (tm int8Int64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int8Int64Map) Len() int { + return len(tm) +} + +type int8Uint16Map map[int8]uint16 + +//PackMap packs TypeMap as msgpack. +func (tm int8Uint16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int8Uint16Map) Len() int { + return len(tm) +} + +type int8Uint32Map map[int8]uint32 + +//PackMap packs TypeMap as msgpack. +func (tm int8Uint32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int8Uint32Map) Len() int { + return len(tm) +} + +type int8Float32Map map[int8]float32 + +//PackMap packs TypeMap as msgpack. +func (tm int8Float32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat32(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int8Float32Map) Len() int { + return len(tm) +} + +type int8Float64Map map[int8]float64 + +//PackMap packs TypeMap as msgpack. +func (tm int8Float64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int8Float64Map) Len() int { + return len(tm) +} + +type int8InterfaceMap map[int8]interface{} + +//PackMap packs TypeMap as msgpack. +func (tm int8InterfaceMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = __PackObject(buf, v, false) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int8InterfaceMap) Len() int { + return len(tm) +} + +type int16StringMap map[int16]string + +//PackMap packs TypeMap as msgpack. +func (tm int16StringMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackString(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int16StringMap) Len() int { + return len(tm) +} + +type int16IntMap map[int16]int + +//PackMap packs TypeMap as msgpack. +func (tm int16IntMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int16IntMap) Len() int { + return len(tm) +} + +type int16Int8Map map[int16]int8 + +//PackMap packs TypeMap as msgpack. +func (tm int16Int8Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int16Int8Map) Len() int { + return len(tm) +} + +type int16Int16Map map[int16]int16 + +//PackMap packs TypeMap as msgpack. +func (tm int16Int16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int16Int16Map) Len() int { + return len(tm) +} + +type int16Int32Map map[int16]int32 + +//PackMap packs TypeMap as msgpack. +func (tm int16Int32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int16Int32Map) Len() int { + return len(tm) +} + +type int16Int64Map map[int16]int64 + +//PackMap packs TypeMap as msgpack. +func (tm int16Int64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int16Int64Map) Len() int { + return len(tm) +} + +type int16Uint16Map map[int16]uint16 + +//PackMap packs TypeMap as msgpack. +func (tm int16Uint16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int16Uint16Map) Len() int { + return len(tm) +} + +type int16Uint32Map map[int16]uint32 + +//PackMap packs TypeMap as msgpack. +func (tm int16Uint32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int16Uint32Map) Len() int { + return len(tm) +} + +type int16Float32Map map[int16]float32 + +//PackMap packs TypeMap as msgpack. +func (tm int16Float32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat32(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int16Float32Map) Len() int { + return len(tm) +} + +type int16Float64Map map[int16]float64 + +//PackMap packs TypeMap as msgpack. +func (tm int16Float64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int16Float64Map) Len() int { + return len(tm) +} + +type int16InterfaceMap map[int16]interface{} + +//PackMap packs TypeMap as msgpack. +func (tm int16InterfaceMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = __PackObject(buf, v, false) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int16InterfaceMap) Len() int { + return len(tm) +} + +type int32StringMap map[int32]string + +//PackMap packs TypeMap as msgpack. +func (tm int32StringMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackString(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int32StringMap) Len() int { + return len(tm) +} + +type int32IntMap map[int32]int + +//PackMap packs TypeMap as msgpack. +func (tm int32IntMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int32IntMap) Len() int { + return len(tm) +} + +type int32Int8Map map[int32]int8 + +//PackMap packs TypeMap as msgpack. +func (tm int32Int8Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int32Int8Map) Len() int { + return len(tm) +} + +type int32Int16Map map[int32]int16 + +//PackMap packs TypeMap as msgpack. +func (tm int32Int16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int32Int16Map) Len() int { + return len(tm) +} + +type int32Int32Map map[int32]int32 + +//PackMap packs TypeMap as msgpack. +func (tm int32Int32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int32Int32Map) Len() int { + return len(tm) +} + +type int32Int64Map map[int32]int64 + +//PackMap packs TypeMap as msgpack. +func (tm int32Int64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int32Int64Map) Len() int { + return len(tm) +} + +type int32Uint16Map map[int32]uint16 + +//PackMap packs TypeMap as msgpack. +func (tm int32Uint16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int32Uint16Map) Len() int { + return len(tm) +} + +type int32Uint32Map map[int32]uint32 + +//PackMap packs TypeMap as msgpack. +func (tm int32Uint32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int32Uint32Map) Len() int { + return len(tm) +} + +type int32Float32Map map[int32]float32 + +//PackMap packs TypeMap as msgpack. +func (tm int32Float32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat32(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int32Float32Map) Len() int { + return len(tm) +} + +type int32Float64Map map[int32]float64 + +//PackMap packs TypeMap as msgpack. +func (tm int32Float64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int32Float64Map) Len() int { + return len(tm) +} + +type int32InterfaceMap map[int32]interface{} + +//PackMap packs TypeMap as msgpack. +func (tm int32InterfaceMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = __PackObject(buf, v, false) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int32InterfaceMap) Len() int { + return len(tm) +} + +type int64StringMap map[int64]string + +//PackMap packs TypeMap as msgpack. +func (tm int64StringMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackString(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int64StringMap) Len() int { + return len(tm) +} + +type int64IntMap map[int64]int + +//PackMap packs TypeMap as msgpack. +func (tm int64IntMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int64IntMap) Len() int { + return len(tm) +} + +type int64Int8Map map[int64]int8 + +//PackMap packs TypeMap as msgpack. +func (tm int64Int8Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int64Int8Map) Len() int { + return len(tm) +} + +type int64Int16Map map[int64]int16 + +//PackMap packs TypeMap as msgpack. +func (tm int64Int16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int64Int16Map) Len() int { + return len(tm) +} + +type int64Int32Map map[int64]int32 + +//PackMap packs TypeMap as msgpack. +func (tm int64Int32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int64Int32Map) Len() int { + return len(tm) +} + +type int64Int64Map map[int64]int64 + +//PackMap packs TypeMap as msgpack. +func (tm int64Int64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int64Int64Map) Len() int { + return len(tm) +} + +type int64Uint16Map map[int64]uint16 + +//PackMap packs TypeMap as msgpack. +func (tm int64Uint16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int64Uint16Map) Len() int { + return len(tm) +} + +type int64Uint32Map map[int64]uint32 + +//PackMap packs TypeMap as msgpack. +func (tm int64Uint32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int64Uint32Map) Len() int { + return len(tm) +} + +type int64Float32Map map[int64]float32 + +//PackMap packs TypeMap as msgpack. +func (tm int64Float32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat32(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int64Float32Map) Len() int { + return len(tm) +} + +type int64Float64Map map[int64]float64 + +//PackMap packs TypeMap as msgpack. +func (tm int64Float64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int64Float64Map) Len() int { + return len(tm) +} + +type int64InterfaceMap map[int64]interface{} + +//PackMap packs TypeMap as msgpack. +func (tm int64InterfaceMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = __PackObject(buf, v, false) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int64InterfaceMap) Len() int { + return len(tm) +} + +type uint16StringMap map[uint16]string + +//PackMap packs TypeMap as msgpack. +func (tm uint16StringMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackString(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint16StringMap) Len() int { + return len(tm) +} + +type uint16IntMap map[uint16]int + +//PackMap packs TypeMap as msgpack. +func (tm uint16IntMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint16IntMap) Len() int { + return len(tm) +} + +type uint16Int8Map map[uint16]int8 + +//PackMap packs TypeMap as msgpack. +func (tm uint16Int8Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint16Int8Map) Len() int { + return len(tm) +} + +type uint16Int16Map map[uint16]int16 + +//PackMap packs TypeMap as msgpack. +func (tm uint16Int16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint16Int16Map) Len() int { + return len(tm) +} + +type uint16Int32Map map[uint16]int32 + +//PackMap packs TypeMap as msgpack. +func (tm uint16Int32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint16Int32Map) Len() int { + return len(tm) +} + +type uint16Int64Map map[uint16]int64 + +//PackMap packs TypeMap as msgpack. +func (tm uint16Int64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint16Int64Map) Len() int { + return len(tm) +} + +type uint16Uint16Map map[uint16]uint16 + +//PackMap packs TypeMap as msgpack. +func (tm uint16Uint16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint16Uint16Map) Len() int { + return len(tm) +} + +type uint16Uint32Map map[uint16]uint32 + +//PackMap packs TypeMap as msgpack. +func (tm uint16Uint32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint16Uint32Map) Len() int { + return len(tm) +} + +type uint16Float32Map map[uint16]float32 + +//PackMap packs TypeMap as msgpack. +func (tm uint16Float32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat32(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint16Float32Map) Len() int { + return len(tm) +} + +type uint16Float64Map map[uint16]float64 + +//PackMap packs TypeMap as msgpack. +func (tm uint16Float64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint16Float64Map) Len() int { + return len(tm) +} + +type uint16InterfaceMap map[uint16]interface{} + +//PackMap packs TypeMap as msgpack. +func (tm uint16InterfaceMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = __PackObject(buf, v, false) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint16InterfaceMap) Len() int { + return len(tm) +} + +type uint32StringMap map[uint32]string + +//PackMap packs TypeMap as msgpack. +func (tm uint32StringMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackString(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint32StringMap) Len() int { + return len(tm) +} + +type uint32IntMap map[uint32]int + +//PackMap packs TypeMap as msgpack. +func (tm uint32IntMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint32IntMap) Len() int { + return len(tm) +} + +type uint32Int8Map map[uint32]int8 + +//PackMap packs TypeMap as msgpack. +func (tm uint32Int8Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint32Int8Map) Len() int { + return len(tm) +} + +type uint32Int16Map map[uint32]int16 + +//PackMap packs TypeMap as msgpack. +func (tm uint32Int16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint32Int16Map) Len() int { + return len(tm) +} + +type uint32Int32Map map[uint32]int32 + +//PackMap packs TypeMap as msgpack. +func (tm uint32Int32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint32Int32Map) Len() int { + return len(tm) +} + +type uint32Int64Map map[uint32]int64 + +//PackMap packs TypeMap as msgpack. +func (tm uint32Int64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint32Int64Map) Len() int { + return len(tm) +} + +type uint32Uint16Map map[uint32]uint16 + +//PackMap packs TypeMap as msgpack. +func (tm uint32Uint16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint32Uint16Map) Len() int { + return len(tm) +} + +type uint32Uint32Map map[uint32]uint32 + +//PackMap packs TypeMap as msgpack. +func (tm uint32Uint32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint32Uint32Map) Len() int { + return len(tm) +} + +type uint32Float32Map map[uint32]float32 + +//PackMap packs TypeMap as msgpack. +func (tm uint32Float32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat32(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint32Float32Map) Len() int { + return len(tm) +} + +type uint32Float64Map map[uint32]float64 + +//PackMap packs TypeMap as msgpack. +func (tm uint32Float64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint32Float64Map) Len() int { + return len(tm) +} + +type uint32InterfaceMap map[uint32]interface{} + +//PackMap packs TypeMap as msgpack. +func (tm uint32InterfaceMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = __PackObject(buf, v, false) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint32InterfaceMap) Len() int { + return len(tm) +} + +type float32StringMap map[float32]string + +//PackMap packs TypeMap as msgpack. +func (tm float32StringMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat32(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackString(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float32StringMap) Len() int { + return len(tm) +} + +type float32IntMap map[float32]int + +//PackMap packs TypeMap as msgpack. +func (tm float32IntMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat32(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float32IntMap) Len() int { + return len(tm) +} + +type float32Int8Map map[float32]int8 + +//PackMap packs TypeMap as msgpack. +func (tm float32Int8Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat32(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float32Int8Map) Len() int { + return len(tm) +} + +type float32Int16Map map[float32]int16 + +//PackMap packs TypeMap as msgpack. +func (tm float32Int16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat32(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float32Int16Map) Len() int { + return len(tm) +} + +type float32Int32Map map[float32]int32 + +//PackMap packs TypeMap as msgpack. +func (tm float32Int32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat32(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float32Int32Map) Len() int { + return len(tm) +} + +type float32Int64Map map[float32]int64 + +//PackMap packs TypeMap as msgpack. +func (tm float32Int64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat32(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float32Int64Map) Len() int { + return len(tm) +} + +type float32Uint16Map map[float32]uint16 + +//PackMap packs TypeMap as msgpack. +func (tm float32Uint16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat32(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float32Uint16Map) Len() int { + return len(tm) +} + +type float32Uint32Map map[float32]uint32 + +//PackMap packs TypeMap as msgpack. +func (tm float32Uint32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat32(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float32Uint32Map) Len() int { + return len(tm) +} + +type float32Float32Map map[float32]float32 + +//PackMap packs TypeMap as msgpack. +func (tm float32Float32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat32(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat32(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float32Float32Map) Len() int { + return len(tm) +} + +type float32Float64Map map[float32]float64 + +//PackMap packs TypeMap as msgpack. +func (tm float32Float64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat32(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float32Float64Map) Len() int { + return len(tm) +} + +type float32InterfaceMap map[float32]interface{} + +//PackMap packs TypeMap as msgpack. +func (tm float32InterfaceMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat32(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = __PackObject(buf, v, false) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float32InterfaceMap) Len() int { + return len(tm) +} + +type float64StringMap map[float64]string + +//PackMap packs TypeMap as msgpack. +func (tm float64StringMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackString(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float64StringMap) Len() int { + return len(tm) +} + +type float64IntMap map[float64]int + +//PackMap packs TypeMap as msgpack. +func (tm float64IntMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float64IntMap) Len() int { + return len(tm) +} + +type float64Int8Map map[float64]int8 + +//PackMap packs TypeMap as msgpack. +func (tm float64Int8Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float64Int8Map) Len() int { + return len(tm) +} + +type float64Int16Map map[float64]int16 + +//PackMap packs TypeMap as msgpack. +func (tm float64Int16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float64Int16Map) Len() int { + return len(tm) +} + +type float64Int32Map map[float64]int32 + +//PackMap packs TypeMap as msgpack. +func (tm float64Int32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float64Int32Map) Len() int { + return len(tm) +} + +type float64Int64Map map[float64]int64 + +//PackMap packs TypeMap as msgpack. +func (tm float64Int64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float64Int64Map) Len() int { + return len(tm) +} + +type float64Uint16Map map[float64]uint16 + +//PackMap packs TypeMap as msgpack. +func (tm float64Uint16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float64Uint16Map) Len() int { + return len(tm) +} + +type float64Uint32Map map[float64]uint32 + +//PackMap packs TypeMap as msgpack. +func (tm float64Uint32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float64Uint32Map) Len() int { + return len(tm) +} + +type float64Float32Map map[float64]float32 + +//PackMap packs TypeMap as msgpack. +func (tm float64Float32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat32(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float64Float32Map) Len() int { + return len(tm) +} + +type float64Float64Map map[float64]float64 + +//PackMap packs TypeMap as msgpack. +func (tm float64Float64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float64Float64Map) Len() int { + return len(tm) +} + +type float64InterfaceMap map[float64]interface{} + +//PackMap packs TypeMap as msgpack. +func (tm float64InterfaceMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = __PackObject(buf, v, false) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float64InterfaceMap) Len() int { + return len(tm) +} + +type stringUint64Map map[string]uint64 + +//PackMap packs TypeMap as msgpack. +func (tm stringUint64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackString(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackUInt64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm stringUint64Map) Len() int { + return len(tm) +} + +type intUint64Map map[int]uint64 + +//PackMap packs TypeMap as msgpack. +func (tm intUint64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackUInt64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm intUint64Map) Len() int { + return len(tm) +} + +type int8Uint64Map map[int8]uint64 + +//PackMap packs TypeMap as msgpack. +func (tm int8Uint64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackUInt64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int8Uint64Map) Len() int { + return len(tm) +} + +type int16Uint64Map map[int16]uint64 + +//PackMap packs TypeMap as msgpack. +func (tm int16Uint64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackUInt64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int16Uint64Map) Len() int { + return len(tm) +} + +type int32Uint64Map map[int32]uint64 + +//PackMap packs TypeMap as msgpack. +func (tm int32Uint64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackUInt64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int32Uint64Map) Len() int { + return len(tm) +} + +type int64Uint64Map map[int64]uint64 + +//PackMap packs TypeMap as msgpack. +func (tm int64Uint64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackUInt64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm int64Uint64Map) Len() int { + return len(tm) +} + +type uint16Uint64Map map[uint16]uint64 + +//PackMap packs TypeMap as msgpack. +func (tm uint16Uint64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackUInt64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint16Uint64Map) Len() int { + return len(tm) +} + +type uint32Uint64Map map[uint32]uint64 + +//PackMap packs TypeMap as msgpack. +func (tm uint32Uint64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackInt64(buf, int64(k)) + size += n + if err != nil { + return size, err + } + + n, err = PackUInt64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint32Uint64Map) Len() int { + return len(tm) +} + +type float32Uint64Map map[float32]uint64 + +//PackMap packs TypeMap as msgpack. +func (tm float32Uint64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat32(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackUInt64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float32Uint64Map) Len() int { + return len(tm) +} + +type float64Uint64Map map[float64]uint64 + +//PackMap packs TypeMap as msgpack. +func (tm float64Uint64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackFloat64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackUInt64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm float64Uint64Map) Len() int { + return len(tm) +} + +type uint64StringMap map[uint64]string + +//PackMap packs TypeMap as msgpack. +func (tm uint64StringMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackUInt64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackString(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint64StringMap) Len() int { + return len(tm) +} + +type uint64IntMap map[uint64]int + +//PackMap packs TypeMap as msgpack. +func (tm uint64IntMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackUInt64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint64IntMap) Len() int { + return len(tm) +} + +type uint64Int8Map map[uint64]int8 + +//PackMap packs TypeMap as msgpack. +func (tm uint64Int8Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackUInt64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint64Int8Map) Len() int { + return len(tm) +} + +type uint64Int16Map map[uint64]int16 + +//PackMap packs TypeMap as msgpack. +func (tm uint64Int16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackUInt64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint64Int16Map) Len() int { + return len(tm) +} + +type uint64Int32Map map[uint64]int32 + +//PackMap packs TypeMap as msgpack. +func (tm uint64Int32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackUInt64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint64Int32Map) Len() int { + return len(tm) +} + +type uint64Int64Map map[uint64]int64 + +//PackMap packs TypeMap as msgpack. +func (tm uint64Int64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackUInt64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint64Int64Map) Len() int { + return len(tm) +} + +type uint64Uint16Map map[uint64]uint16 + +//PackMap packs TypeMap as msgpack. +func (tm uint64Uint16Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackUInt64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint64Uint16Map) Len() int { + return len(tm) +} + +type uint64Uint32Map map[uint64]uint32 + +//PackMap packs TypeMap as msgpack. +func (tm uint64Uint32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackUInt64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackInt64(buf, int64(v)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint64Uint32Map) Len() int { + return len(tm) +} + +type uint64Uint64Map map[uint64]uint64 + +//PackMap packs TypeMap as msgpack. +func (tm uint64Uint64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackUInt64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackUInt64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint64Uint64Map) Len() int { + return len(tm) +} + +type uint64Float32Map map[uint64]float32 + +//PackMap packs TypeMap as msgpack. +func (tm uint64Float32Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackUInt64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat32(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint64Float32Map) Len() int { + return len(tm) +} + +type uint64Float64Map map[uint64]float64 + +//PackMap packs TypeMap as msgpack. +func (tm uint64Float64Map) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackUInt64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = PackFloat64(buf, v) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint64Float64Map) Len() int { + return len(tm) +} + +type uint64InterfaceMap map[uint64]interface{} + +//PackMap packs TypeMap as msgpack. +func (tm uint64InterfaceMap) PackMap(buf BufferEx) (int, error) { + size := 0 + for k, v := range tm { + n, err := PackUInt64(buf, k) + size += n + if err != nil { + return size, err + } + + n, err = __PackObject(buf, v, false) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +// Len return the length of TypeSlice +func (tm uint64InterfaceMap) Len() int { + return len(tm) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/host.go b/vendor/github.com/aerospike/aerospike-client-go/host.go new file mode 100644 index 00000000000..dd6e9edab31 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/host.go @@ -0,0 +1,48 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "net" + "strconv" +) + +// Host name/port of database server. +type Host struct { + + // Host name or IP address of database server. + Name string + + //TLSName defines the TLS certificate name used for secure connections. + TLSName string + + // Port of database server. + Port int +} + +// NewHost initializes new host instance. +func NewHost(name string, port int) *Host { + return &Host{Name: name, Port: port} +} + +// Implements stringer interface +func (h *Host) String() string { + return net.JoinHostPort(h.Name, strconv.Itoa(h.Port)) +} + +// Implements stringer interface +func (h *Host) equals(other *Host) bool { + return h.Name == other.Name && h.Port == other.Port +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/index_collection_type.go b/vendor/github.com/aerospike/aerospike-client-go/index_collection_type.go new file mode 100644 index 00000000000..33171338c0a --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/index_collection_type.go @@ -0,0 +1,53 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import "fmt" + +// IndexCollectionType is the secondary index collection type. +type IndexCollectionType int + +const ( + + // Normal scalar index. + ICT_DEFAULT IndexCollectionType = iota + + // LIST is Index list elements. + ICT_LIST + + // MAPKEYS is Index map keys. + ICT_MAPKEYS + + // MAPVALUES is Index map values. + ICT_MAPVALUES +) + +// ictToString converts IndexCollectionType to string representations +func ictToString(ict IndexCollectionType) string { + switch ict { + + case ICT_LIST: + return "LIST" + + case ICT_MAPKEYS: + return "MAPKEYS" + + case ICT_MAPVALUES: + return "MAPVALUES" + + default: + panic(fmt.Sprintf("Unknown IndexCollectionType value %v", ict)) + } +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/index_type.go b/vendor/github.com/aerospike/aerospike-client-go/index_type.go new file mode 100644 index 00000000000..6af3cb647c0 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/index_type.go @@ -0,0 +1,29 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +// IndexType the type of the secondary index. +type IndexType string + +const ( + // NUMERIC specifies an index on numeric values. + NUMERIC IndexType = "NUMERIC" + + // STRING specifies an index on string values. + STRING IndexType = "STRING" + + // 2-dimensional spherical geospatial index. + GEO2DSPHERE IndexType = "GEO2DSPHERE" +) diff --git a/vendor/github.com/aerospike/aerospike-client-go/info.go b/vendor/github.com/aerospike/aerospike-client-go/info.go new file mode 100644 index 00000000000..e8aba2841f7 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/info.go @@ -0,0 +1,149 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "bytes" + "encoding/binary" + "strings" + "time" + + . "github.com/aerospike/aerospike-client-go/logger" + . "github.com/aerospike/aerospike-client-go/types" +) + +const ( + _DEFAULT_TIMEOUT = 2 * time.Second + _NO_TIMEOUT = 365 * 24 * time.Hour +) + +// Access server's info monitoring protocol. +type info struct { + msg *Message +} + +// RequestNodeInfo gets info values by name from the specified database server node. +func RequestNodeInfo(node *Node, name ...string) (map[string]string, error) { + conn, err := node.GetConnection(_DEFAULT_TIMEOUT) + if err != nil { + return nil, err + } + + response, err := RequestInfo(conn, name...) + if err != nil { + conn.Close() + return nil, err + } + node.PutConnection(conn) + return response, nil +} + +// RequestNodeStats returns statistics for the specified node as a map +func RequestNodeStats(node *Node) (map[string]string, error) { + infoMap, err := RequestNodeInfo(node, "statistics") + if err != nil { + return nil, err + } + + res := map[string]string{} + + v, exists := infoMap["statistics"] + if !exists { + return res, nil + } + + values := strings.Split(v, ";") + for i := range values { + kv := strings.Split(values[i], "=") + if len(kv) > 1 { + res[kv[0]] = kv[1] + } + } + + return res, nil +} + +// Send multiple commands to server and store results. +func newInfo(conn *Connection, commands ...string) (*info, error) { + commandStr := strings.Trim(strings.Join(commands, "\n"), " ") + if strings.Trim(commandStr, " ") != "" { + commandStr += "\n" + } + newInfo := &info{ + msg: NewMessage(MSG_INFO, []byte(commandStr)), + } + + if err := newInfo.sendCommand(conn); err != nil { + return nil, err + } + return newInfo, nil +} + +// RequestInfo gets info values by name from the specified connection. +func RequestInfo(conn *Connection, names ...string) (map[string]string, error) { + info, err := newInfo(conn, names...) + if err != nil { + return nil, err + } + return info.parseMultiResponse() +} + +// Issue request and set results buffer. This method is used internally. +// The static request methods should be used instead. +func (nfo *info) sendCommand(conn *Connection) error { + // Write. + if _, err := conn.Write(nfo.msg.Serialize()); err != nil { + Logger.Debug("Failed to send command.") + return err + } + + // Read - reuse input buffer. + header := bytes.NewBuffer(make([]byte, MSG_HEADER_SIZE)) + if _, err := conn.Read(header.Bytes(), MSG_HEADER_SIZE); err != nil { + return err + } + if err := binary.Read(header, binary.BigEndian, &nfo.msg.MessageHeader); err != nil { + Logger.Debug("Failed to read command response.") + return err + } + + // Logger.Debug("Header Response: %v %v %v %v", t.Type, t.Version, t.Length(), t.DataLen) + if err := nfo.msg.Resize(nfo.msg.Length()); err != nil { + return err + } + _, err := conn.Read(nfo.msg.Data, len(nfo.msg.Data)) + return err +} + +func (nfo *info) parseMultiResponse() (map[string]string, error) { + responses := make(map[string]string) + data := strings.Trim(string(nfo.msg.Data), "\n") + + keyValuesArr := strings.Split(data, "\n") + for _, keyValueStr := range keyValuesArr { + KeyValArr := strings.Split(keyValueStr, "\t") + + switch len(KeyValArr) { + case 1: + responses[KeyValArr[0]] = "" + case 2: + responses[KeyValArr[0]] = KeyValArr[1] + default: + Logger.Error("Requested info buffer does not adhere to the protocol: %s", data) + } + } + + return responses, nil +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/internal/lua/instance.go b/vendor/github.com/aerospike/aerospike-client-go/internal/lua/instance.go new file mode 100644 index 00000000000..b0a62970877 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/internal/lua/instance.go @@ -0,0 +1,71 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lua + +import ( + "github.com/aerospike/aerospike-client-go/internal/lua/resources" + "github.com/aerospike/aerospike-client-go/logger" + "github.com/aerospike/aerospike-client-go/types" + "github.com/yuin/gopher-lua" +) + +// SetPath sets the interpreter's current Lua Path +func SetPath(lpath string) { + lua.LuaPath = lpath +} + +// LuaPath returns the interpreter's current Lua Path +func LuaPath() string { + return lua.LuaPath +} + +type LuaInstance struct { + instance *lua.LState +} + +// Global LState pool +var LuaPool = types.NewPool(64) + +func newInstance(params ...interface{}) interface{} { + L := lua.NewState() + + registerLuaAerospikeType(L) + registerLuaStreamType(L) + registerLuaListType(L) + registerLuaMapType(L) + + if err := L.DoString(luaLib.LibStreamOps); err != nil { + logger.Logger.Error(err.Error()) + return nil + } + + if err := L.DoString(luaLib.LibAerospike); err != nil { + logger.Logger.Error(err.Error()) + return nil + } + + return L +} + +func finalizeInstance(instance interface{}) { + if instance != nil { + instance.(*lua.LState).Close() + } +} + +func init() { + LuaPool.New = newInstance + LuaPool.Finalize = finalizeInstance +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/internal/lua/lua.go b/vendor/github.com/aerospike/aerospike-client-go/internal/lua/lua.go new file mode 100644 index 00000000000..5914ed266ca --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/internal/lua/lua.go @@ -0,0 +1,155 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lua + +import ( + "fmt" + "reflect" + + "github.com/yuin/gopher-lua" +) + +// NewValue creates a value from interface{} in the interpreter +func NewValue(L *lua.LState, value interface{}) lua.LValue { + // Nils should return immediately + if value == nil { + return lua.LNil + } + + // if it is a LValue already, return it without delay + if lval, ok := value.(lua.LValue); ok { + return lval + } + + switch v := value.(type) { + case string: + return lua.LString(v) + case int: + return lua.LNumber(float64(v)) + case uint: + return lua.LNumber(float64(v)) + case int8: + return lua.LNumber(float64(v)) + case uint8: + return lua.LNumber(float64(v)) + case int16: + return lua.LNumber(float64(v)) + case uint16: + return lua.LNumber(float64(v)) + case int32: + return lua.LNumber(float64(v)) + case uint32: + return lua.LNumber(float64(v)) + case int64: + return lua.LNumber(float64(v)) + case uint64: + return lua.LNumber(float64(v)) + case float32: + return lua.LNumber(float64(v)) + case float64: + return lua.LNumber(v) + case bool: + return lua.LBool(v) + case map[interface{}]interface{}: + luaMap := &LuaMap{m: v} + ud := L.NewUserData() + ud.Value = luaMap + L.SetMetatable(ud, L.GetTypeMetatable(luaLuaMapTypeName)) + return ud + + case []interface{}: + luaList := &LuaList{l: v} + ud := L.NewUserData() + ud.Value = luaList + L.SetMetatable(ud, L.GetTypeMetatable(luaLuaListTypeName)) + return ud + } + + // check for array and map + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Array, reflect.Slice: + l := rv.Len() + arr := make([]interface{}, l) + for i := 0; i < l; i++ { + arr[i] = rv.Index(i).Interface() + } + + return NewValue(L, arr) + case reflect.Map: + l := rv.Len() + amap := make(map[interface{}]interface{}, l) + for _, i := range rv.MapKeys() { + amap[i.Interface()] = rv.MapIndex(i).Interface() + } + + return NewValue(L, amap) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return NewValue(L, reflect.ValueOf(value).Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: + return NewValue(L, int64(reflect.ValueOf(value).Uint())) + case reflect.String: + return NewValue(L, rv.String()) + case reflect.Float32, reflect.Float64: + return NewValue(L, rv.Float()) + case reflect.Bool: + return NewValue(L, rv.Bool()) + } + + panic(fmt.Sprintf("unrecognized data type for lua: %#v\n", value)) +} + +// LValueToInterface converts a generic LValue to a native type +func LValueToInterface(val lua.LValue) interface{} { + switch val.Type() { + case lua.LTNil: + return nil + case lua.LTBool: + return lua.LVAsBool(val) + case lua.LTNumber: + return float64(lua.LVAsNumber(val)) + case lua.LTString: + return lua.LVAsString(val) + case lua.LTUserData: + ud := val.(*lua.LUserData).Value + switch v := ud.(type) { + case *LuaMap: + return v.m + case *LuaList: + return v.l + default: + return v + } + + case lua.LTTable: + t := val.(*lua.LTable) + m := make(map[interface{}]interface{}, t.Len()) + t.ForEach(func(k, v lua.LValue) { m[k] = v }) + return m + default: + panic(fmt.Sprintf("unrecognized data type %#v", val)) + } +} + +func allToString(L *lua.LState) int { + ud := L.CheckUserData(1) + value := ud.Value + if stringer, ok := value.(fmt.Stringer); ok { + L.Push(lua.LString(stringer.String())) + } else { + L.Push(lua.LString(fmt.Sprintf("%v", value))) + } + return 1 +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/internal/lua/lua_aerospike.go b/vendor/github.com/aerospike/aerospike-client-go/internal/lua/lua_aerospike.go new file mode 100644 index 00000000000..6859e1669c4 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/internal/lua/lua_aerospike.go @@ -0,0 +1,65 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lua + +import ( + "github.com/aerospike/aerospike-client-go/logger" + "github.com/yuin/gopher-lua" +) + +type LuaAerospike struct { + s chan interface{} +} + +const luaLuaAerospikeTypeName = "LuaAerospike" + +// Registers my luaAerospike type to given L. +func registerLuaAerospikeType(L *lua.LState) { + mt := L.NewTypeMetatable(luaLuaAerospikeTypeName) + + L.SetGlobal("aerospike", mt) + + // static attributes + L.SetField(mt, "log", L.NewFunction(luaAerospikeLog)) + + L.SetMetatable(mt, mt) +} + +func luaAerospikeLog(L *lua.LState) int { + if L.GetTop() < 2 || L.GetTop() > 3 { + L.ArgError(1, "2 arguments are expected for aerospike:log method") + return 0 + } + + // account for calling it on a table + paramIdx := 1 + if L.GetTop() == 3 { + paramIdx = 2 + } + + level := L.CheckInt(paramIdx) + str := L.CheckString(paramIdx + 1) + + switch level { + case 1: + logger.Logger.Warn(str) + case 2: + logger.Logger.Info(str) + case 3, 4: + logger.Logger.Debug(str) + } + + return 0 +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/internal/lua/lua_list.go b/vendor/github.com/aerospike/aerospike-client-go/internal/lua/lua_list.go new file mode 100644 index 00000000000..775c3a9cb89 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/internal/lua/lua_list.go @@ -0,0 +1,400 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lua + +import ( + "fmt" + + "github.com/yuin/gopher-lua" +) + +type LuaList struct { + l []interface{} +} + +const luaLuaListTypeName = "LuaList" + +// Registers my luaList type to given L. +func registerLuaListType(L *lua.LState) { + mt := L.NewTypeMetatable(luaLuaListTypeName) + + // List package + L.SetGlobal("List", mt) + + // static attributes + + L.SetMetatable(mt, mt) + + // list package + mt = L.NewTypeMetatable(luaLuaListTypeName) + L.SetGlobal("list", mt) + + // static attributes + L.SetField(mt, "__call", L.NewFunction(newLuaList)) + L.SetField(mt, "create", L.NewFunction(createLuaList)) + + L.SetField(mt, "size", L.NewFunction(luaListSize)) + L.SetField(mt, "insert", L.NewFunction(luaListInsert)) + L.SetField(mt, "append", L.NewFunction(luaListAppend)) + L.SetField(mt, "prepend", L.NewFunction(luaListPrepend)) + L.SetField(mt, "take", L.NewFunction(luaListTake)) + L.SetField(mt, "remove", L.NewFunction(luaListRemove)) + L.SetField(mt, "drop", L.NewFunction(luaListDrop)) + L.SetField(mt, "trim", L.NewFunction(luaListTrim)) + L.SetField(mt, "clone", L.NewFunction(luaListClone)) + L.SetField(mt, "concat", L.NewFunction(luaListConcat)) + L.SetField(mt, "merge", L.NewFunction(luaListMerge)) + L.SetField(mt, "iterator", L.NewFunction(luaListIterator)) + + // methods + L.SetFuncs(mt, map[string]lua.LGFunction{ + "__index": luaListIndex, + "__newindex": luaListNewIndex, + "__len": luaListLen, + "__tostring": luaListToString, + }) + + L.SetMetatable(mt, mt) +} + +// Constructor +func createLuaList(L *lua.LState) int { + if L.GetTop() == 0 { + luaList := &LuaList{l: []interface{}{}} + ud := L.NewUserData() + ud.Value = luaList + L.SetMetatable(ud, L.GetTypeMetatable(luaLuaListTypeName)) + L.Push(ud) + return 1 + } else if L.GetTop() == 1 || L.GetTop() == 2 { + cp := L.CheckInt(1) + l := make([]interface{}, 0, cp) + + luaList := &LuaList{l: l} + ud := L.NewUserData() + ud.Value = luaList + L.SetMetatable(ud, L.GetTypeMetatable(luaLuaListTypeName)) + L.Push(ud) + return 1 + } + L.ArgError(1, "Only one argument expected for list#create method") + return 0 +} + +// Constructor +func newLuaList(L *lua.LState) int { + if L.GetTop() == 1 { + luaList := &LuaList{l: []interface{}{}} + ud := L.NewUserData() + ud.Value = luaList + L.SetMetatable(ud, L.GetTypeMetatable(luaLuaListTypeName)) + L.Push(ud) + return 1 + } else if L.GetTop() == 2 { + t := L.CheckTable(2) + l := make([]interface{}, t.Len()) + for i := 1; i <= t.Len(); i++ { + l[i-1] = LValueToInterface(t.RawGetInt(i)) + } + + luaList := &LuaList{l: l} + ud := L.NewUserData() + ud.Value = luaList + L.SetMetatable(ud, L.GetTypeMetatable(luaLuaListTypeName)) + L.Push(ud) + return 1 + } + L.ArgError(1, "Only one argument expected for list#create method") + return 0 +} + +// Checks whether the first lua argument is a *LUserData with *LuaList and returns this *LuaList. +func checkLuaList(L *lua.LState, arg int) *LuaList { + ud := L.CheckUserData(arg) + if v, ok := ud.Value.(*LuaList); ok { + return v + } + L.ArgError(1, "luaList expected") + return nil +} + +func luaListRemove(L *lua.LState) int { + p := checkLuaList(L, 1) + if L.GetTop() != 2 { + L.ArgError(1, "Only one argument expected for remove method") + return 0 + } + index := L.CheckInt(2) - 1 + + if index < 0 || index >= len(p.l) { + L.ArgError(1, "index out of range for list#remove") + return 0 + } + + for i := index; i < len(p.l)-1; i++ { + p.l[i] = p.l[i+1] + } + p.l = p.l[:len(p.l)-1] + + return 0 +} + +func luaListInsert(L *lua.LState) int { + p := checkLuaList(L, 1) + if L.GetTop() != 3 { + L.ArgError(1, "Only two arguments expected for insert method") + return 0 + } + index := L.CheckInt(2) + value := LValueToInterface(L.CheckAny(3)) + + if cap(p.l) > len(p.l) { + for i := len(p.l); i >= index; i-- { + p.l[i] = p.l[i-1] + } + p.l[index-1] = value + } else { + ln := len(p.l) * 2 + if ln > 256 { + ln = 256 + } + newList := make([]interface{}, len(p.l)+1, ln) + + copy(newList, p.l[:index-1]) + newList[index-1] = value + copy(newList[index:], p.l[index-1:len(p.l)]) + p.l = newList + } + + return 0 +} + +func luaListAppend(L *lua.LState) int { + p := checkLuaList(L, 1) + if L.GetTop() != 2 { + L.ArgError(1, "Only one argument expected for append method") + return 0 + } + value := LValueToInterface(L.CheckAny(2)) + p.l = append(p.l, value) + + return 0 +} + +func luaListPrepend(L *lua.LState) int { + p := checkLuaList(L, 1) + if L.GetTop() != 2 { + L.ArgError(1, "Only one argument expected for append method") + return 0 + } + value := LValueToInterface(L.CheckAny(2)) + + if cap(p.l) > len(p.l) { + p.l = append(p.l, nil) + for i := len(p.l) - 1; i > 0; i-- { + p.l[i] = p.l[i-1] + } + p.l[0] = value + } else { + ln := len(p.l) * 2 + if ln > 256 { + ln = 256 + } + newList := make([]interface{}, len(p.l)+1, ln) + + copy(newList[1:], p.l) + newList[0] = value + p.l = newList + } + + return 0 +} + +func luaListTake(L *lua.LState) int { + p := checkLuaList(L, 1) + if L.GetTop() != 2 { + L.ArgError(1, "Only one argument expected for take method") + return 0 + } + + count := L.CheckInt(2) + items := p.l + if count <= len(p.l) { + items = p.l[:count] + } + + luaList := &LuaList{l: items} + ud := L.NewUserData() + ud.Value = luaList + L.SetMetatable(ud, L.GetTypeMetatable(luaLuaListTypeName)) + L.Push(ud) + return 1 +} + +func luaListDrop(L *lua.LState) int { + p := checkLuaList(L, 1) + if L.GetTop() != 2 { + L.ArgError(1, "Only one argument expected for take method") + return 0 + } + + count := L.CheckInt(2) + var items []interface{} + if count < len(p.l) { + items = p.l[count:] + } else { + items = []interface{}{} + } + + luaList := &LuaList{l: items} + ud := L.NewUserData() + ud.Value = luaList + L.SetMetatable(ud, L.GetTypeMetatable(luaLuaListTypeName)) + L.Push(ud) + return 1 +} + +func luaListTrim(L *lua.LState) int { + p := checkLuaList(L, 1) + if L.GetTop() != 2 { + L.ArgError(1, "Only one argument expected for list#trim method") + return 0 + } + + count := L.CheckInt(2) + p.l = p.l[:count-1] + + return 0 +} + +func luaListConcat(L *lua.LState) int { + p := checkLuaList(L, 1) + if L.GetTop() != 2 { + L.ArgError(1, "Only one argument expected for list#concat method") + return 0 + } + + sp := checkLuaList(L, 2) + p.l = append(p.l, sp.l...) + return 0 +} + +// LuaList#clone() +func luaListClone(L *lua.LState) int { + p := checkLuaList(L, 1) + if L.GetTop() != 1 { + L.ArgError(1, "only one argument expected for list#clone method") + return 0 + } + + newList := &LuaList{l: make([]interface{}, len(p.l))} + copy(newList.l, p.l) + + ud := L.NewUserData() + ud.Value = newList + L.SetMetatable(ud, L.GetTypeMetatable(luaLuaListTypeName)) + L.Push(ud) + return 1 +} + +// LuaList#merge() +func luaListMerge(L *lua.LState) int { + p := checkLuaList(L, 1) + if L.GetTop() != 2 { + L.ArgError(1, "Only one argument expected for merge method") + return 0 + } + + sp := checkLuaList(L, 2) + + newList := &LuaList{l: make([]interface{}, 0, len(p.l)+len(sp.l))} + newList.l = append(newList.l, p.l...) + newList.l = append(newList.l, sp.l...) + + ud := L.NewUserData() + ud.Value = newList + L.SetMetatable(ud, L.GetTypeMetatable(luaLuaListTypeName)) + L.Push(ud) + + return 1 +} + +func luaListToString(L *lua.LState) int { + p := checkLuaList(L, 1) + if L.GetTop() != 1 { + L.ArgError(1, "No arguments expected for tostring method") + return 0 + } + + L.Push(lua.LString(fmt.Sprintf("%v", p.l))) + return 1 +} + +func luaListSize(L *lua.LState) int { + p := checkLuaList(L, 1) + if L.GetTop() != 1 { + L.ArgError(1, "No arguments expected for __size method") + return 0 + } + L.Push(lua.LNumber(len(p.l))) + return 1 +} + +func luaListIndex(L *lua.LState) int { + ref := checkLuaList(L, 1) + index := L.CheckInt(2) + + if index <= 0 || index > len(ref.l) { + L.Push(lua.LNil) + return 1 + } + + item := ref.l[index-1] + L.Push(NewValue(L, item)) + return 1 +} + +func luaListNewIndex(L *lua.LState) int { + ref := checkLuaList(L, 1) + index := L.CheckInt(2) + value := L.CheckAny(3) + + ref.l[index-1] = LValueToInterface(value) + return 0 +} + +func luaListLen(L *lua.LState) int { + ref := checkLuaList(L, 1) + L.Push(lua.LNumber(len(ref.l))) + return 1 +} + +func luaListIterator(L *lua.LState) int { + ref := checkLuaList(L, 1) + + // make an iterator + idx := 0 + llen := len(ref.l) + fn := func(L *lua.LState) int { + if idx < llen { + L.Push(NewValue(L, ref.l[idx])) + idx++ + return 1 + } + return 0 + } + L.Push(L.NewFunction(fn)) + return 1 +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/internal/lua/lua_map.go b/vendor/github.com/aerospike/aerospike-client-go/internal/lua/lua_map.go new file mode 100644 index 00000000000..452812c62f7 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/internal/lua/lua_map.go @@ -0,0 +1,389 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lua + +import ( + "fmt" + + "github.com/yuin/gopher-lua" +) + +type LuaMap struct { + m map[interface{}]interface{} +} + +const luaLuaMapTypeName = "LuaMap" + +func registerLuaMapType(L *lua.LState) { + // Map package + mt := L.NewTypeMetatable(luaLuaMapTypeName) + + L.SetGlobal("Map", mt) + + // static attributes + L.SetField(mt, "__call", L.NewFunction(newLuaMap)) + + L.SetField(mt, "create", L.NewFunction(luaMapCreate)) + + // methods + L.SetMetatable(mt, mt) + + // map package + mt = L.NewTypeMetatable(luaLuaMapTypeName) + + L.SetGlobal("map", mt) + + // static attributes + L.SetField(mt, "__call", L.NewFunction(newLuaMap)) + + L.SetField(mt, "create", L.NewFunction(luaMapCreate)) + + L.SetField(mt, "pairs", L.NewFunction(luaMapPairs)) + L.SetField(mt, "size", L.NewFunction(luaMapSize)) + L.SetField(mt, "keys", L.NewFunction(luaMapKeys)) + L.SetField(mt, "values", L.NewFunction(luaMapValues)) + L.SetField(mt, "remove", L.NewFunction(luaMapRemove)) + L.SetField(mt, "clone", L.NewFunction(luaMapClone)) + L.SetField(mt, "merge", L.NewFunction(luaMapMerge)) + L.SetField(mt, "diff", L.NewFunction(luaMapDiff)) + + // methods + L.SetFuncs(mt, map[string]lua.LGFunction{ + "__index": luaMapIndex, + "__newindex": luaMapNewIndex, + "__len": luaMapSize, + "__tostring": luaMapToString, + }) + + L.SetMetatable(mt, mt) +} + +// Constructor +func luaMapCreate(L *lua.LState) int { + if L.GetTop() == 1 { + luaMap := &LuaMap{m: map[interface{}]interface{}{}} + ud := L.NewUserData() + ud.Value = luaMap + L.SetMetatable(ud, L.GetTypeMetatable(luaLuaMapTypeName)) + L.Push(ud) + return 1 + } else if L.GetTop() == 2 { + L.CheckTable(1) + sz := L.CheckInt(2) + luaMap := &LuaMap{m: make(map[interface{}]interface{}, sz)} + ud := L.NewUserData() + ud.Value = luaMap + L.SetMetatable(ud, L.GetTypeMetatable(luaLuaMapTypeName)) + L.Push(ud) + return 1 + } + L.ArgError(1, "Only one argument expected for map create method") + return 0 +} + +func newLuaMap(L *lua.LState) int { + if L.GetTop() == 1 { + luaMap := &LuaMap{m: make(map[interface{}]interface{}, 4)} + ud := L.NewUserData() + ud.Value = luaMap + L.SetMetatable(ud, L.GetTypeMetatable(luaLuaMapTypeName)) + L.Push(ud) + return 1 + } else if L.GetTop() == 2 { + L.CheckTable(1) + t := L.CheckTable(2) + m := make(map[interface{}]interface{}, t.Len()) + t.ForEach(func(k, v lua.LValue) { m[LValueToInterface(k)] = LValueToInterface(v) }) + + luaMap := &LuaMap{m: m} + ud := L.NewUserData() + ud.Value = luaMap + L.SetMetatable(ud, L.GetTypeMetatable(luaLuaMapTypeName)) + L.Push(ud) + return 1 + } + L.ArgError(1, "Only one argument expected for map create method") + return 0 +} + +// Checks whether the first lua argument is a *LUserData with *LuaMap and returns this *LuaMap. +func checkLuaMap(L *lua.LState, arg int) *LuaMap { + ud := L.CheckUserData(arg) + if v, ok := ud.Value.(*LuaMap); ok { + return v + } + L.ArgError(1, "luaMap expected") + return nil +} + +func luaMapRemove(L *lua.LState) int { + p := checkLuaMap(L, 1) + if L.GetTop() != 2 { + L.ArgError(1, "Only one argument expected for remove method") + return 0 + } + key := L.CheckAny(2) + + delete(p.m, LValueToInterface(key)) + return 0 +} + +func luaMapClone(L *lua.LState) int { + p := checkLuaMap(L, 1) + if L.GetTop() != 1 { + L.ArgError(1, "No arguments expected for clone method") + return 0 + } + + newMap := &LuaMap{m: make(map[interface{}]interface{}, len(p.m))} + for k, v := range p.m { + newMap.m[k] = v + } + + ud := L.NewUserData() + ud.Value = newMap + L.SetMetatable(ud, L.GetTypeMetatable(luaLuaMapTypeName)) + L.Push(ud) + return 1 +} + +func luaMapMerge(L *lua.LState) int { + p := checkLuaMap(L, 1) + if L.GetTop() < 2 || L.GetTop() > 3 { + L.ArgError(1, "Only 2 or 3 argument expected for merge method") + return 0 + } + + if L.GetTop() == 2 { + sp := checkLuaMap(L, 2) + + newMap := &LuaMap{m: make(map[interface{}]interface{}, len(p.m)+len(sp.m))} + for k, v := range p.m { + newMap.m[k] = v + } + + for k, v := range sp.m { + newMap.m[k] = v + } + + ud := L.NewUserData() + ud.Value = newMap + L.SetMetatable(ud, L.GetTypeMetatable(luaLuaMapTypeName)) + L.Push(ud) + } else { + sp := checkLuaMap(L, 2) + fn := L.CheckFunction(3) + + newMap := &LuaMap{m: make(map[interface{}]interface{}, len(p.m)+len(sp.m))} + for k, v := range p.m { + if v2, exists := sp.m[k]; exists { + L.CallByParam(lua.P{Fn: fn, NRet: 1, Protect: true, Handler: nil}, NewValue(L, v), NewValue(L, v2)) + ret := L.CheckAny(-1) + L.Pop(1) // remove received value + newMap.m[k] = LValueToInterface(ret) + } else { + newMap.m[k] = v + } + } + + for k, v := range sp.m { + // only add keys that haven't been processed already + if _, exists := newMap.m[k]; !exists { + newMap.m[k] = v + } + } + + ud := L.NewUserData() + ud.Value = newMap + L.SetMetatable(ud, L.GetTypeMetatable(luaLuaMapTypeName)) + L.Push(ud) + } + + return 1 +} + +func luaMapDiff(L *lua.LState) int { + p := checkLuaMap(L, 1) + if L.GetTop() != 2 { + L.ArgError(1, "Only one argument expected for diff method") + return 0 + } + + sp := checkLuaMap(L, 2) + + newMap := &LuaMap{m: make(map[interface{}]interface{}, len(p.m)+len(sp.m))} + + for k, v := range p.m { + if _, exists := sp.m[k]; !exists { + newMap.m[k] = v + } + } + + for k, v := range sp.m { + if _, exists := p.m[k]; !exists { + newMap.m[k] = v + } + } + + ud := L.NewUserData() + ud.Value = newMap + L.SetMetatable(ud, L.GetTypeMetatable(luaLuaMapTypeName)) + L.Push(ud) + + return 1 +} + +func luaMapToString(L *lua.LState) int { + p := checkLuaMap(L, 1) + if L.GetTop() != 1 { + L.ArgError(1, "No arguments expected for tostring method") + return 0 + } + L.Push(lua.LString(fmt.Sprintf("%v", p.m))) + return 1 +} + +func luaMapSize(L *lua.LState) int { + p := checkLuaMap(L, 1) + if L.GetTop() != 1 { + L.ArgError(1, "No arguments expected for __size method") + return 0 + } + L.Push(lua.LNumber(len(p.m))) + return 1 +} + +func luaMapIndex(L *lua.LState) int { + ref := checkMap(L, 1) + key := LValueToInterface(L.CheckAny(2)) + + v := ref.m[key] + if v == nil { + v = lua.LNil + } + + L.Push(NewValue(L, v)) + return 1 +} + +func luaMapNewIndex(L *lua.LState) int { + ref := checkMap(L, 1) + key := LValueToInterface(L.CheckAny(2)) + value := LValueToInterface(L.CheckAny(3)) + + ref.m[key] = value + return 0 +} + +func luaMapLen(L *lua.LState) int { + ref := checkMap(L, 1) + L.Push(lua.LNumber(len(ref.m))) + return 1 +} + +func luaMapPairs(L *lua.LState) int { + ref := checkMap(L, 1) + + // make an iterator + iter := make(chan *struct{ k, v interface{} }) + + go func() { + for k, v := range ref.m { + iter <- &struct{ k, v interface{} }{k, v} + } + close(iter) + }() + + fn := func(L *lua.LState) int { + tuple := <-iter + if tuple == nil { + return 0 + } + + L.Push(NewValue(L, tuple.k)) + L.Push(NewValue(L, tuple.v)) + return 2 + } + L.Push(L.NewFunction(fn)) + return 1 +} + +func luaMapKeys(L *lua.LState) int { + ref := checkMap(L, 1) + + // make an iterator + iter := make(chan interface{}) + + go func() { + for k := range ref.m { + iter <- k + } + close(iter) + }() + + fn := func(L *lua.LState) int { + tuple := <-iter + if tuple == nil { + return 0 + } + + L.Push(NewValue(L, tuple)) + return 1 + } + L.Push(L.NewFunction(fn)) + return 1 +} + +func luaMapValues(L *lua.LState) int { + ref := checkMap(L, 1) + + // make an iterator + iter := make(chan interface{}) + + go func() { + for _, v := range ref.m { + iter <- v + } + close(iter) + }() + + fn := func(L *lua.LState) int { + tuple := <-iter + if tuple == nil { + return 0 + } + + L.Push(NewValue(L, tuple)) + return 1 + } + L.Push(L.NewFunction(fn)) + return 1 +} + +func luaMapEq(L *lua.LState) int { + map1 := checkMap(L, 1) + map2 := checkMap(L, 2) + L.Push(lua.LBool(map1 == map2)) + return 1 +} + +func checkMap(L *lua.LState, idx int) *LuaMap { + ud := L.CheckUserData(1) + if v, ok := ud.Value.(*LuaMap); ok { + return v + } + L.ArgError(1, "luaMap expected") + return nil +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/internal/lua/lua_stream.go b/vendor/github.com/aerospike/aerospike-client-go/internal/lua/lua_stream.go new file mode 100644 index 00000000000..af854b2b24a --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/internal/lua/lua_stream.go @@ -0,0 +1,128 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lua + +import ( + "fmt" + + "github.com/yuin/gopher-lua" +) + +type LuaStream struct { + s chan interface{} +} + +const luaLuaStreamTypeName = "LuaStream" + +// Registers my luaStream type to given L. +func registerLuaStreamType(L *lua.LState) { + mt := L.NewTypeMetatable(luaLuaStreamTypeName) + + L.SetGlobal("stream", mt) + + // static attributes + L.SetField(mt, "__call", L.NewFunction(newLuaStream)) + L.SetField(mt, "read", L.NewFunction(luaStreamRead)) + L.SetField(mt, "write", L.NewFunction(luaStreamWrite)) + L.SetField(mt, "readable", L.NewFunction(luaStreamReadable)) + L.SetField(mt, "writeable", L.NewFunction(luaStreamWriteable)) + + // methods + L.SetFuncs(mt, map[string]lua.LGFunction{ + "__tostring": luaStreamToString, + }) + + L.SetMetatable(mt, mt) +} + +// NewLuaStream creates a LuaStream +func NewLuaStream(L *lua.LState, stream chan interface{}) *lua.LUserData { + luaStream := &LuaStream{s: stream} + ud := L.NewUserData() + ud.Value = luaStream + L.SetMetatable(ud, L.GetTypeMetatable(luaLuaStreamTypeName)) + return ud +} + +func newLuaStream(L *lua.LState) int { + luaStream := &LuaStream{s: make(chan interface{}, 64)} + ud := L.NewUserData() + ud.Value = luaStream + L.SetMetatable(ud, L.GetTypeMetatable(luaLuaStreamTypeName)) + L.Push(ud) + return 1 +} + +// Checks whether the first lua argument is a *LUserData with *LuaStream and returns this *LuaStream. +func checkLuaStream(L *lua.LState, arg int) *LuaStream { + ud := L.CheckUserData(arg) + if v, ok := ud.Value.(*LuaStream); ok { + return v + } + L.ArgError(1, "luaSteam expected") + return nil +} + +func luaStreamToString(L *lua.LState) int { + p := checkLuaStream(L, 1) + if L.GetTop() != 1 { + L.ArgError(1, "No arguments expected for tostring method") + return 0 + } + L.Push(lua.LString(fmt.Sprintf("%v", p.s))) + return 1 +} + +func luaStreamRead(L *lua.LState) int { + p := checkLuaStream(L, 1) + if L.GetTop() != 1 { + L.ArgError(1, "No arguments expected for stream:read method") + return 0 + } + + L.Push(NewValue(L, <-p.s)) + return 1 +} + +func luaStreamWrite(L *lua.LState) int { + p := checkLuaStream(L, 1) + if L.GetTop() != 2 { + L.ArgError(1, "Only one argument expected for stream:write method") + return 0 + } + + p.s <- LValueToInterface(L.CheckAny(2)) + return 1 +} + +func luaStreamReadable(L *lua.LState) int { + checkLuaStream(L, 1) + if L.GetTop() != 1 { + L.ArgError(1, "No arguments expected for readable method") + return 0 + } + L.Push(lua.LBool(true)) + return 1 +} + +func luaStreamWriteable(L *lua.LState) int { + checkLuaStream(L, 1) + if L.GetTop() != 1 { + L.ArgError(1, "No arguments expected for writeable method") + return 0 + } + L.Push(lua.LBool(true)) + return 1 +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/internal/lua/resources/aerospike.go b/vendor/github.com/aerospike/aerospike-client-go/internal/lua/resources/aerospike.go new file mode 100644 index 00000000000..8efdd4a4884 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/internal/lua/resources/aerospike.go @@ -0,0 +1,201 @@ +package luaLib + +const LibAerospike = ` + +-- The Lua Interface to Aerospike +-- +-- ====================================================================== +-- Copyright [2014] Aerospike, Inc.. Portions may be licensed +-- to Aerospike, Inc. under one or more contributor license agreements. +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- ====================================================================== + +-- A table to track whether we had sandboxed a function +sandboxed = {} + +ldebug = debug; + +-- ############################################################################ +-- +-- LOG FUNCTIONS +-- +-- ############################################################################ + +function trace(m, ...) + return aerospike:log(4, string.format(m, ...)) +end + +function debug(m, ...) + return aerospike:log(3, string.format(m, ...)) +end + +function info(m, ...) + return aerospike:log(2, string.format(m, ...)) +end + +function warn(m, ...) + return aerospike:log(1, string.format(m, ...)) +end + +-- ############################################################################ +-- +-- APPLY FUNCTIONS +-- +-- ############################################################################ + +-- +-- Creates a new environment for use in apply_record functions +-- +function env_record() + return { + + -- aerospike types + ["record"] = record, + ["iterator"] = iterator, + ["list"] = list, + ["map"] = map, + ["bytes"] = bytes, + ["aerospike"] = aerospike, + + ["putX"] = putX, + + -- logging functions + ["trace"] = trace, + ["debug"] = debug, + ["info"] = info, + ["warn"] = warn, + + -- standard lua functions + ["collectgarbage"] = collectgarbage, + ["error"] = error, + ["getmetatable"] = getmetatable, + ["ipairs"] = ipairs, + ["load"] = loadstring, + ["module"] = module, + ["next"] = next, + ["pairs"] = pairs, + ["print"] = print, + ["pcall"] = pcall, + ["rawequal"] = rawequal, + ["rawget"] = rawget, + ["rawset"] = rawset, + ["require"] = require, + ["require"] = require, + ["select"] = select, + ["setmetatable"] = setmetatable, + ["setfenv"] = setfenv, + ["tonumber"] = tonumber, + ["tostring"] = tostring, + ["type"] = type, + ["unpack"] = unpack, + ["xpcall"] = xpcall, + + -- standard lua objects + ["math"] = math, + ["io"] = io, + ["os"] = { + ['clock'] = os.clock, + ['date'] = os.date, + ['difftime'] = os.difftime, + ['getenv'] = os.getenv, + ['setlocale'] = os.setlocale, + ['time'] = os.time, + ['tmpname'] = os.tmpname + }, + ["package"] = package, + ["string"] = string, + ["table"] = table, + + -- standard lua variables + ["_G"] = {} + } +end + +-- +-- Apply function to a record and arguments. +-- +-- @param f the fully-qualified name of the function. +-- @param r the record to be applied to the function. +-- @param ... additional arguments to be applied to the function. +-- @return result of the called function or nil. +-- +function apply_record(f, r, ...) + + if f == nil then + error("function not found", 2) + end + + if not sandboxed[f] then + setfenv(f,env_record()) + sandboxed[f] = true + end + + success, result = pcall(f, r, ...) + if success then + return result + else + error(result, 2) + return nil + end +end + +-- +-- Apply function to an iterator and arguments. +-- +-- @param f the fully-qualified name of the function. +-- @param s the iterator to be applied to the function. +-- @param ... additional arguments to be applied to the function. +-- @return 0 on success, otherwise failure. +-- +function apply_stream(f, scope, istream, ostream, ...) + + if f == nil then + error("function not found", 2) + return 2 + end + + --require("stream_ops") + + if not sandboxed[f] then + setfenv(f,env_record()) + sandboxed[f] = true + end + + local stream_ops = StreamOps_create(); + success, result = pcall(f, stream_ops, ...) + + -- info("apply_stream: success=%s, result=%s", tostring(success), tostring(result)) + + if success then + local ops = StreamOps_select(result.ops, scope); + + -- Apply server operations to the stream + -- result => a stream_ops object + local values = StreamOps_apply(stream_iterator(istream), ops); + + -- Iterate the stream of values from the computation + -- then pipe it to the ostream + for value in values do + -- info("value = %s", tostring(value)) + stream.write(ostream, value) + end + + -- 0 is success + return 0 + else + error(result, 2) + return 2 + end +end +` diff --git a/vendor/github.com/aerospike/aerospike-client-go/internal/lua/resources/stream_ops.go b/vendor/github.com/aerospike/aerospike-client-go/internal/lua/resources/stream_ops.go new file mode 100644 index 00000000000..174b61431ef --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/internal/lua/resources/stream_ops.go @@ -0,0 +1,358 @@ +package luaLib + +const LibStreamOps = ` +-- Lua Interface for Aerospike Record Stream Support +-- +-- ====================================================================== +-- Copyright [2014] Aerospike, Inc.. Portions may be licensed +-- to Aerospike, Inc. under one or more contributor license agreements. +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- ====================================================================== + +local function check_limit(v) + return type(v) == 'number' and v >= 1000 +end + +-- +-- clone a table. creates a shallow copy of the table. +-- +local function clone_table(t) + local out = {} + for k,v in pairs(t) do + out[k] = v + end + return out +end + +-- +-- Clone a value. +-- +local function clone(v) + + local t = type(v) + + if t == 'number' then + return v + elseif t == 'string' then + return v + elseif t == 'boolean' then + return v + elseif t == 'table' then + return clone_table(v) + elseif t == 'userdata' then + if v.__index == Map then + return map.clone(v) + elseif v.__index == List then + return list.clone(v) + end + return nil + end + + return v +end + +-- +-- Filter values +-- @param next - a generator that produces the next value from a stream +-- @param f - the function to transform each value +-- +function filter( next, p ) + -- done indicates if we exhausted the 'next' stream + local done = false + + -- return a closure which the caller can use to get the next value + return function() + + -- we bail if we already exhausted the stream + if done then return nil end + + -- find the first value which satisfies the predicate + for a in next do + if p(a) then + return a + end + end + + done = true + + return nil + end +end + +-- +-- Transform values +-- @param next - a generator that produces the next value from a stream +-- @param f - the tranfomation operation +-- +function transform( next, f ) + -- done indicates if we exhausted the 'next' stream + local done = false + + -- return a closure which the caller can use to get the next value + return function() + + -- we bail if we already exhausted the stream + if done then return nil end + + -- get the first value + local a = next() + + -- apply the transformation + if a ~= nil then + return f(a) + end + + done = true; + + return nil + end +end + +-- +-- Combines two values from an istream into a single value. +-- @param next - a generator that produces the next value from a stream +-- @param f - the reduction operation +-- +function reduce( next, f ) + -- done indicates if we exhausted the 'next' stream + local done = false + + -- return a closure which the caller can use to get the next value + return function() + + + -- we bail if we already exhausted the stream + if done then return nil end + + -- get the first value + local a = next() + + if a ~= nil then + -- get each subsequent value and reduce them + for b in next do + a = f(a,b) + end + end + + -- we are done! + done = true + + return a + end +end + +-- +-- Aggregate values into a single value. +-- @param next - a generator that produces the next value from a stream +-- @param f - the aggregation operation +-- +function aggregate( next, init, f ) + -- done indicates if we exhausted the 'next' stream + local done = false + + -- return a closure which the caller can use to get the next value + return function() + + -- we bail if we already exhausted the stream + if done then return nil end + + -- get the initial value + local a = clone(init) + + -- get each subsequent value and aggregate them + for b in next do + a = f(a,b) + + -- check the size limit, if it is exceeded, + -- then return the value + if check_limit(a) then + return a + end + end + + -- we are done! + done = true + + return a + end +end + +-- +-- as_stream iterator +-- +function stream_iterator(s) + local done = false + return function() + if done then return nil end + local v = stream.read(s) + if v == nil then + done = true + end + return v; + end +end + + + +-- ###################################################################################### +-- +-- StreamOps +-- Builds a sequence of operations to be applied to a stream of values. +-- +-- ###################################################################################### + +StreamOps = {} +StreamOps_mt = { __index = StreamOps } + +-- Op only executes on server +local SCOPE_SERVER = 1 + +-- Op only executes on client +local SCOPE_CLIENT = 2 + +-- Op can execute on either client or server +local SCOPE_EITHER = 3 + +-- Op executes on both client and server +local SCOPE_BOTH = 4 + +-- +-- Creates a new StreamOps using an array of ops +-- +-- @param ops an array of operations +-- +function StreamOps_create() + local self = {} + setmetatable(self, StreamOps_mt) + self.ops = {} + return self +end + +function StreamOps_apply(stream, ops, i, n) + + -- if nil, then use default values + i = i or 1 + n = n or #ops + + -- if index in list > size of list, then return the stream + if i > n then return stream end + + -- get the current operation + local op = ops[i] + + -- apply the operation and get a stream or use provided stream + local s = op.func(stream, unpack(op.args)) or stream + + -- move to the next operation + return StreamOps_apply(s, ops, i + 1, n) +end + + +-- +-- This selects the operations appropriate for a given scope. +-- For the SERVER scope, it will select the first n ops until one of the ops +-- is a CLIENT scope op. +-- For the CLIENT scope, it will skip the first n ops that are SERVER scope +-- ops, then it will take the remaining ops, including SERVER scoped ops. +-- +function StreamOps_select(stream_ops, scope) + local server_ops = {} + local client_ops = {} + + local phase = SCOPE_SERVER + for i,op in ipairs(stream_ops) do + if phase == SCOPE_SERVER then + if op.scope == SCOPE_SERVER then + table.insert(server_ops, op) + elseif op.scope == SCOPE_EITHER then + table.insert(server_ops, op) + elseif op.scope == SCOPE_BOTH then + table.insert(server_ops, op) + table.insert(client_ops, op) + phase = SCOPE_CLIENT + end + elseif phase == SCOPE_CLIENT then + table.insert(client_ops, op) + end + end + + if scope == SCOPE_CLIENT then + return client_ops + else + return server_ops + end +end + + + +-- +-- OPS: [ OP, ... ] +-- OP: {scope=SCOPE, name=NAME, func=FUNC, args=ARGS} +-- SCOPE: ANY(0) | SERVER(1) | CLIENT(2) | +-- NAME: FUNCTION NAME +-- FUNC: FUNCTION POINTER +-- ARGS: ARRAY OF ARGUMENTS +-- + + +function StreamOps:aggregate(...) + table.insert(self.ops, { scope = SCOPE_SERVER, name = "aggregate", func = aggregate, args = {...}}) + return self +end + +function StreamOps:reduce(...) + table.insert(self.ops, { scope = SCOPE_BOTH, name = "reduce", func = reduce, args = {...}}) + return self +end + +function StreamOps:map(...) + table.insert(self.ops, { scope = SCOPE_EITHER, name = "map", func = transform, args = {...}}) + return self +end + +function StreamOps:filter(...) + table.insert(self.ops, { scope = SCOPE_EITHER, name = "filter", func = filter, args = {...}}) + return self +end + +-- stream : group(f) +-- +-- Group By will return a Map of keys to a list of values. The key is determined by applying the +-- function 'f' to each element in the stream. +-- +function StreamOps:groupby(f) + + local function _aggregate(m, v) + local k = f and f(v) or nil; + local l = m[k] or list() + list.append(l, v) + m[k] = l; + return m; + end + + local function _merge(l1, l2) + local l = list.clone(l1) + for v in list.iterator(l2) do + list.append(l, v) + end + return l + end + + function _reduce(m1, m2) + return map.merge(m1, m2, _merge) + end + + return self : aggregate(map(), _aggregate) : reduce(_reduce) +end +` diff --git a/vendor/github.com/aerospike/aerospike-client-go/key.go b/vendor/github.com/aerospike/aerospike-client-go/key.go new file mode 100644 index 00000000000..56d549cd663 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/key.go @@ -0,0 +1,159 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "bytes" + "fmt" + + . "github.com/aerospike/aerospike-client-go/types" + Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" +) + +// Key is the unique record identifier. Records can be identified using a specified namespace, +// an optional set name, and a user defined key which must be unique within a set. +// Records can also be identified by namespace/digest which is the combination used +// on the server. +type Key struct { + // namespace. Equivalent to database name. + namespace string + + // Optional set name. Equivalent to database table. + setName string + + // Unique server hash value generated from set name and user key. + digest [20]byte + + // Original user key. This key is immediately converted to a hash digest. + // This key is not used or returned by the server by default. If the user key needs + // to persist on the server, use one of the following methods: + // + // Set "WritePolicy.sendKey" to true. In this case, the key will be sent to the server for storage on writes + // and retrieved on multi-record scans and queries. + // Explicitly store and retrieve the key in a bin. + userKey Value + + keyWriter keyWriter +} + +// Namespace returns key's namespace. +func (ky *Key) Namespace() string { + return ky.namespace +} + +// SetName returns key's set name. +func (ky *Key) SetName() string { + return ky.setName +} + +// Value returns key's value. +func (ky *Key) Value() Value { + return ky.userKey +} + +// SetValue sets the Key's value and recompute's its digest without allocating new memory. +// This allows the keys to be reusable. +func (ky *Key) SetValue(val Value) error { + ky.userKey = val + return ky.computeDigest() +} + +// Digest returns key digest. +func (ky *Key) Digest() []byte { + return ky.digest[:] +} + +// Equals uses key digests to compare key equality. +func (ky *Key) Equals(other *Key) bool { + return bytes.Equal(ky.digest[:], other.digest[:]) +} + +// String implements Stringer interface and returns string representation of key. +func (ky *Key) String() string { + if ky == nil { + return "" + } + + if ky.userKey != nil { + return fmt.Sprintf("%s:%s:%s:%v", ky.namespace, ky.setName, ky.userKey.String(), Buffer.BytesToHexString(ky.digest[:])) + } + return fmt.Sprintf("%s:%s::%v", ky.namespace, ky.setName, Buffer.BytesToHexString(ky.digest[:])) +} + +// NewKey initializes a key from namespace, optional set name and user key. +// The set name and user defined key are converted to a digest before sending to the server. +// The server handles record identifiers by digest only. +func NewKey(namespace string, setName string, key interface{}) (*Key, error) { + newKey := &Key{ + namespace: namespace, + setName: setName, + userKey: NewValue(key), + } + + if err := newKey.computeDigest(); err != nil { + return nil, err + } + + return newKey, nil +} + +// NewKeyWithDigest initializes a key from namespace, optional set name and user key. +// The server handles record identifiers by digest only. +func NewKeyWithDigest(namespace string, setName string, key interface{}, digest []byte) (*Key, error) { + newKey := &Key{ + namespace: namespace, + setName: setName, + userKey: NewValue(key), + } + + if err := newKey.SetDigest(digest); err != nil { + return nil, err + } + return newKey, nil +} + +// SetDigest sets a custom hash +func (ky *Key) SetDigest(digest []byte) error { + if len(digest) != 20 { + return NewAerospikeError(PARAMETER_ERROR, "Invalid digest: Digest is required to be exactly 20 bytes.") + } + copy(ky.digest[:], digest) + return nil +} + +// Generate unique server hash value from set name, key type and user defined key. +// The hash function is RIPEMD-160 (a 160 bit hash). +func (ky *Key) computeDigest() error { + // With custom changes to the ripemd160 package, + // now the following line does not allocate on the heap anymore/. + ky.keyWriter.hash.Reset() + + if _, err := ky.keyWriter.Write([]byte(ky.setName)); err != nil { + return err + } + + if _, err := ky.keyWriter.Write([]byte{byte(ky.userKey.GetType())}); err != nil { + return err + } + + if err := ky.keyWriter.writeKey(ky.userKey); err != nil { + return err + } + + // With custom changes to the ripemd160 package, + // the following line does not allocate on he heap anymore. + ky.keyWriter.hash.Sum(ky.digest[:]) + return nil +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/key_helper.go b/vendor/github.com/aerospike/aerospike-client-go/key_helper.go new file mode 100644 index 00000000000..e30d14dca3b --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/key_helper.go @@ -0,0 +1,149 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "encoding/binary" + "math" + + "github.com/aerospike/aerospike-client-go/pkg/ripemd160" + . "github.com/aerospike/aerospike-client-go/types" +) + +type keyWriter struct { + buffer [8]byte + offset int + hash ripemd160.Digest +} + +// Int64ToBytes converts an int64 into slice of Bytes. +func (vb *keyWriter) WriteInt64(num int64) (int, error) { + return vb.WriteUint64(uint64(num)) +} + +// Uint64ToBytes converts an uint64 into slice of Bytes. +func (vb *keyWriter) WriteUint64(num uint64) (int, error) { + binary.BigEndian.PutUint64(vb.buffer[:8], num) + vb.hash.Write(vb.buffer[:8]) + return 8, nil +} + +// Int32ToBytes converts an int32 to a byte slice of size 4 +func (vb *keyWriter) WriteInt32(num int32) (int, error) { + return vb.WriteUint32(uint32(num)) +} + +// Uint32ToBytes converts an uint32 to a byte slice of size 4 +func (vb *keyWriter) WriteUint32(num uint32) (int, error) { + binary.BigEndian.PutUint32(vb.buffer[:4], num) + vb.hash.Write(vb.buffer[:4]) + return 4, nil +} + +// Int16ToBytes converts an int16 to slice of bytes +func (vb *keyWriter) WriteInt16(num int16) (int, error) { + return vb.WriteUint16(uint16(num)) +} + +// UInt16ToBytes converts an iuint16 to slice of bytes +func (vb *keyWriter) WriteUint16(num uint16) (int, error) { + binary.BigEndian.PutUint16(vb.buffer[:2], num) + vb.hash.Write(vb.buffer[:2]) + return 2, nil +} + +func (vb *keyWriter) WriteFloat32(float float32) (int, error) { + bits := math.Float32bits(float) + binary.BigEndian.PutUint32(vb.buffer[:4], bits) + vb.hash.Write(vb.buffer[:4]) + return 4, nil +} + +func (vb *keyWriter) WriteFloat64(float float64) (int, error) { + bits := math.Float64bits(float) + binary.BigEndian.PutUint64(vb.buffer[:8], bits) + vb.hash.Write(vb.buffer[:8]) + return 8, nil +} + +func (vb *keyWriter) WriteByte(b byte) error { + _, err := vb.hash.Write([]byte{b}) + return err +} + +func (vb *keyWriter) WriteString(s string) (int, error) { + // To avoid allocating memory, write the strings in small chunks + l := len(s) + const size = 128 + b := [size]byte{} + cnt := 0 + for i := 0; i < l; i++ { + b[cnt] = s[i] + cnt++ + + if cnt == size { + vb.Write(b[:]) + cnt = 0 + } + } + + if cnt > 0 { + vb.Write(b[:cnt]) + } + + return len(s), nil +} + +func (vb *keyWriter) Write(b []byte) (int, error) { + vb.hash.Write(b) + return len(b), nil +} + +func (vb *keyWriter) writeKey(val Value) error { + switch v := val.(type) { + case IntegerValue: + vb.WriteInt64(int64(v)) + return nil + case LongValue: + vb.WriteInt64(int64(v)) + return nil + case FloatValue: + vb.WriteFloat64(float64(v)) + return nil + case StringValue: + vb.WriteString(string(v)) + return nil + case ListValue: + v.pack(vb) + return nil + case *ListValue: + v.pack(vb) + return nil + case *ListerValue: + v.pack(vb) + return nil + case ValueArray: + v.pack(vb) + return nil + case *ValueArray: + v.pack(vb) + return nil + case BytesValue: + vb.Write(v) + return nil + } + + return NewAerospikeError(PARAMETER_ERROR, "Key Generation Error. Value not supported: "+val.String()) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/language.go b/vendor/github.com/aerospike/aerospike-client-go/language.go new file mode 100644 index 00000000000..72855607c65 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/language.go @@ -0,0 +1,24 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +// Language specifies User defined function languages. +type Language string + +const ( + + // LUA embedded programming language. + LUA Language = "LUA" +) diff --git a/vendor/github.com/aerospike/aerospike-client-go/large_list.go b/vendor/github.com/aerospike/aerospike-client-go/large_list.go new file mode 100644 index 00000000000..c3e60ddd8bc --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/large_list.go @@ -0,0 +1,262 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +// LargeList encapsulates a list within a single bin. +type LargeList struct { + *baseLargeObject +} + +// NewLargeList initializes a large list operator. +func NewLargeList(client *Client, policy *WritePolicy, key *Key, binName string, userModule string) *LargeList { + return &LargeList{ + baseLargeObject: newLargeObject(client, policy, key, binName, userModule, "llist"), + } +} + +// Add adds values to the list. +// If the list does not exist, create it +func (ll *LargeList) Add(values ...interface{}) (err error) { + _, err = ll.client.Execute(ll.policy, ll.key, ll.packageName, "add", ll.binName, ToValueArray(values)) + return err +} + +// Update updates/adds each value in values list depending if key exists or not. +func (ll *LargeList) Update(values ...interface{}) (err error) { + _, err = ll.client.Execute(ll.policy, ll.key, ll.packageName, "update", ll.binName, ToValueArray(values)) + return err +} + +// Remove deletes value from list. +func (ll *LargeList) Remove(values ...interface{}) (err error) { + _, err = ll.client.Execute(ll.policy, ll.key, ll.packageName, "remove", ll.binName, ToValueArray(values)) + return err +} + +// Find selects values from list. +func (ll *LargeList) Find(value interface{}) ([]interface{}, error) { + res, err := ll.client.Execute(ll.policy, ll.key, ll.packageName, "find", ll.binName, NewValue(value)) + if err != nil { + return nil, err + } + + if res == nil { + return []interface{}{}, nil + } + return res.([]interface{}), err +} + +// Do key/values exist? Return list of results in one batch call. +func (ll *LargeList) Exist(values ...interface{}) ([]bool, error) { + res, err := ll.client.Execute(ll.policy, ll.key, ll.packageName, "exists", ll.binName, NewValue(values)) + if err != nil { + return nil, err + } + + var ret []bool + if res == nil { + return make([]bool, len(values)), nil + } else { + ret = make([]bool, len(values)) + resTyped := res.([]interface{}) + for i := range resTyped { + ret[i] = resTyped[i].(int) != 0 + } + } + + return ret, err +} + +// FindThenFilter selects values from list and applies specified Lua filter. +func (ll *LargeList) FindThenFilter(value interface{}, filterModule, filterName string, filterArgs ...interface{}) ([]interface{}, error) { + res, err := ll.client.Execute(ll.policy, ll.key, ll.packageName, "find", ll.binName, NewValue(value), NewValue(filterModule), NewValue(filterName), ToValueArray(filterArgs)) + if err != nil { + return nil, err + } + + if res == nil { + return []interface{}{}, nil + } + return res.([]interface{}), err +} + +// FindFirst selects values from the beginning of list up to a maximum count. +func (ll *LargeList) FindFirst(count int) ([]interface{}, error) { + res, err := ll.client.Execute(ll.policy, ll.key, ll.packageName, "find_first", ll.binName, NewValue(count)) + if err != nil { + return nil, err + } + + if res == nil { + return []interface{}{}, nil + } + return res.([]interface{}), err +} + +// FFilterThenindFirst selects values from the beginning of list up to a maximum count after applying lua filter. +func (ll *LargeList) FFilterThenindFirst(count int, filterModule, filterName string, filterArgs ...interface{}) ([]interface{}, error) { + res, err := ll.client.Execute(ll.policy, ll.key, ll.packageName, "find_first", ll.binName, NewValue(count), NewValue(filterModule), NewValue(filterName), ToValueArray(filterArgs)) + if err != nil { + return nil, err + } + + if res == nil { + return []interface{}{}, nil + } + return res.([]interface{}), err +} + +// FindLast selects values from the end of list up to a maximum count. +func (ll *LargeList) FindLast(count int) ([]interface{}, error) { + res, err := ll.client.Execute(ll.policy, ll.key, ll.packageName, "find_last", ll.binName, NewValue(count)) + if err != nil { + return nil, err + } + + if res == nil { + return []interface{}{}, nil + } + return res.([]interface{}), err +} + +// FilterThenFindLast selects values from the end of list up to a maximum count after applying lua filter. +func (ll *LargeList) FilterThenFindLast(count int, filterModule, filterName string, filterArgs ...interface{}) ([]interface{}, error) { + res, err := ll.client.Execute(ll.policy, ll.key, ll.packageName, "find_last", ll.binName, NewValue(count), NewValue(filterModule), NewValue(filterName), ToValueArray(filterArgs)) + if err != nil { + return nil, err + } + + if res == nil { + return []interface{}{}, nil + } + return res.([]interface{}), err +} + +// FindFrom selects values from the begin key up to a maximum count. +func (ll *LargeList) FindFrom(begin interface{}, count int) ([]interface{}, error) { + res, err := ll.client.Execute(ll.policy, ll.key, ll.packageName, "find_from", ll.binName, NewValue(begin), NewValue(count)) + if err != nil { + return nil, err + } + + if res == nil { + return []interface{}{}, nil + } + return res.([]interface{}), err +} + +// FilterThenFindFrom selects values from the begin key up to a maximum count after applying lua filter. +func (ll *LargeList) FilterThenFindFrom(begin interface{}, count int, filterModule, filterName string, filterArgs ...interface{}) ([]interface{}, error) { + res, err := ll.client.Execute(ll.policy, ll.key, ll.packageName, "find_from", ll.binName, NewValue(begin), NewValue(count), NewValue(filterModule), NewValue(filterName), ToValueArray(filterArgs)) + if err != nil { + return nil, err + } + + if res == nil { + return []interface{}{}, nil + } + return res.([]interface{}), err +} + +// Range selects a range of values from the large list. +func (ll *LargeList) Range(begin, end interface{}) ([]interface{}, error) { + res, err := ll.client.Execute(ll.policy, ll.key, ll.packageName, "find_range", ll.binName, NewValue(begin), NewValue(end)) + if err != nil { + return nil, err + } + + if res == nil { + return []interface{}{}, nil + } + return res.([]interface{}), err +} + +// RangeN selects a range of values up to a maximum count from the large list. +func (ll *LargeList) RangeN(begin, end interface{}, count int) ([]interface{}, error) { + res, err := ll.client.Execute(ll.policy, ll.key, ll.packageName, "find_range", ll.binName, NewValue(begin), NewValue(end), NewValue(count)) + if err != nil { + return nil, err + } + + if res == nil { + return []interface{}{}, nil + } + return res.([]interface{}), err +} + +// RangeThenFilter selects a range of values from the large list then apply filter. +func (ll *LargeList) RangeThenFilter(begin, end interface{}, filterModule string, filterName string, filterArgs ...interface{}) ([]interface{}, error) { + res, err := ll.client.Execute(ll.policy, ll.key, ll.packageName, "range", ll.binName, NewValue(begin), NewValue(end), NewValue(0), NewValue(filterModule), NewValue(filterName), ToValueArray(filterArgs)) + if err != nil { + return nil, err + } + + if res == nil { + return []interface{}{}, nil + } + return res.([]interface{}), err +} + +// RangeNThenFilter selects a range of values up to a maximum count from the large list then apply filter. +func (ll *LargeList) RangeNThenFilter(begin, end interface{}, count int, filterModule string, filterName string, filterArgs ...interface{}) ([]interface{}, error) { + res, err := ll.client.Execute(ll.policy, ll.key, ll.packageName, "range", ll.binName, NewValue(begin), NewValue(end), NewValue(count), NewValue(filterModule), NewValue(filterName), ToValueArray(filterArgs)) + if err != nil { + return nil, err + } + + if res == nil { + return []interface{}{}, nil + } + return res.([]interface{}), err +} + +// Scan returns all objects in the list. +func (ll *LargeList) Scan() ([]interface{}, error) { + return ll.scan(ll) +} + +// Filter selects values from list and apply specified Lua filter. +func (ll *LargeList) Filter(filterModule, filterName string, filterArgs ...interface{}) ([]interface{}, error) { + res, err := ll.client.Execute(ll.policy, ll.key, ll.packageName, "scan", ll.binName, NewValue(filterModule), NewValue(filterName), ToValueArray(filterArgs)) + if err != nil { + return nil, err + } + + if res == nil { + return []interface{}{}, nil + } + return res.([]interface{}), err +} + +// Destroy deletes the bin containing the list. +func (ll *LargeList) Destroy() error { + return ll.destroy(ll) +} + +// Size returns size of list. +func (ll *LargeList) Size() (int, error) { + return ll.size(ll) +} + +// SetPageSize sets the LDT page size. +func (ll *LargeList) SetPageSize(pageSize int) error { + _, err := ll.client.Execute(ll.policy, ll.key, ll.packageName, "setPageSize", ll.binName, NewValue(pageSize)) + return err +} + +// GetConfig returns map of list configuration parameters. +func (ll *LargeList) GetConfig() (map[interface{}]interface{}, error) { + return ll.getConfig(ll) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/large_map.go b/vendor/github.com/aerospike/aerospike-client-go/large_map.go new file mode 100644 index 00000000000..d386e790c78 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/large_map.go @@ -0,0 +1,124 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +///////////////////////////////////////////////////////////// +// +// NOTICE: +// THIS FEATURE HAS BEEN DEPRECATED ON SERVER. +// THE API WILL BE REMOVED FROM THE CLIENT IN THE FUTURE. +// +///////////////////////////////////////////////////////////// + +package aerospike + +// LargeMap encapsulates a map within a single bin. +type LargeMap struct { + *baseLargeObject +} + +// NewLargeMap initializes a large map operator. +func NewLargeMap(client *Client, policy *WritePolicy, key *Key, binName string, userModule string) *LargeMap { + return &LargeMap{ + baseLargeObject: newLargeObject(client, policy, key, binName, userModule, "lmap"), + } +} + +// Put adds an entry to the map. +// If the map does not exist, create it using specified userModule configuration. +func (lm *LargeMap) Put(name interface{}, value interface{}) error { + _, err := lm.client.Execute(lm.policy, lm.key, lm.packageName, "put", lm.binName, NewValue(name), NewValue(value), lm.userModule) + return err +} + +// PutMap adds map values to the map. +// If the map does not exist, create it using specified userModule configuration. +func (lm *LargeMap) PutMap(theMap map[interface{}]interface{}) error { + _, err := lm.client.Execute(lm.policy, lm.key, lm.packageName, "put_all", lm.binName, NewMapValue(theMap), lm.userModule) + return err +} + +// Exists checks existence of key in the map. +func (lm *LargeMap) Exists(keyValue interface{}) (bool, error) { + res, err := lm.client.Execute(lm.policy, lm.key, lm.packageName, "exists", lm.binName, NewValue(keyValue)) + + if err != nil { + return false, err + } + + if res == nil { + return false, nil + } + return (res.(int) != 0), err +} + +// Get returns value from map corresponding with the provided key. +func (lm *LargeMap) Get(name interface{}) (map[interface{}]interface{}, error) { + res, err := lm.client.Execute(lm.policy, lm.key, lm.packageName, "get", lm.binName, NewValue(name)) + + if err != nil { + return nil, err + } + + if res == nil { + return nil, nil + } + return res.(map[interface{}]interface{}), err +} + +// Remove deletes a value from map given a key. +func (lm *LargeMap) Remove(name interface{}) error { + _, err := lm.client.Execute(lm.policy, lm.key, lm.packageName, "remove", lm.binName, NewValue(name)) + return err +} + +// Scan returns all objects in the map. +func (lm *LargeMap) Scan() (map[interface{}]interface{}, error) { + res, err := lm.client.Execute(lm.policy, lm.key, lm.packageName, "scan", lm.binName) + if err != nil { + return nil, err + } + + if res == nil { + return nil, nil + } + return res.(map[interface{}]interface{}), err +} + +// Filter selects items from the map. +func (lm *LargeMap) Filter(filterName string, filterArgs ...interface{}) (map[interface{}]interface{}, error) { + res, err := lm.client.Execute(lm.policy, lm.key, lm.packageName, "filter", lm.binName, lm.userModule, NewStringValue(filterName), ToValueArray(filterArgs)) + if err != nil { + return nil, err + } + + if res == nil { + return nil, nil + } + return res.(map[interface{}]interface{}), err +} + +// Destroy deletes the bin containing the map. +func (lm *LargeMap) Destroy() error { + return lm.destroy(lm) +} + +// Size returns size of the map. +func (lm *LargeMap) Size() (int, error) { + return lm.size(lm) +} + +// GetConfig returns map of map configuration parameters. +func (lm *LargeMap) GetConfig() (map[interface{}]interface{}, error) { + return lm.getConfig(lm) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/large_object.go b/vendor/github.com/aerospike/aerospike-client-go/large_object.go new file mode 100644 index 00000000000..d6558b43f46 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/large_object.go @@ -0,0 +1,105 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +// LargeObject interface defines methods to work with LDTs. +type LargeObject interface { + // Destroy the bin containing LDT. + Destroy() error + // Size returns the size of the LDT. + Size() (int, error) + // GetConfig returns a map containing LDT config values. + GetConfig() (map[interface{}]interface{}, error) +} + +// Create and manage a large object within a single bin. A large object is last in/first out (LIFO). +type baseLargeObject struct { + client *Client + policy *WritePolicy + key *Key + binName Value + userModule Value + packageName string +} + +// Initialize large large object operator. +// +// client client +// policy generic configuration parameters, pass in nil for defaults +// key unique record identifier +// binName bin name +// userModule Lua function name that initializes list configuration parameters, pass nil for default large object +func newLargeObject(client *Client, policy *WritePolicy, key *Key, binName, userModule string, packageName string) *baseLargeObject { + r := &baseLargeObject{ + client: client, + policy: policy, + key: key, + binName: NewStringValue(binName), + packageName: packageName, + } + + if userModule == "" { + r.userModule = NewNullValue() + } else { + r.userModule = NewStringValue(userModule) + } + + return r +} + +// Delete bin containing the object. +func (lo *baseLargeObject) destroy(ifc LargeObject) error { + _, err := lo.client.Execute(lo.policy, lo.key, lo.packageName, "destroy", lo.binName) + return err +} + +// Return size of object. +func (lo *baseLargeObject) size(ifc LargeObject) (int, error) { + ret, err := lo.client.Execute(lo.policy, lo.key, lo.packageName, "size", lo.binName) + if err != nil { + return -1, err + } + + if ret != nil { + return ret.(int), nil + } + return 0, nil +} + +// Return map of object configuration parameters. +func (lo *baseLargeObject) getConfig(ifc LargeObject) (map[interface{}]interface{}, error) { + res, err := lo.client.Execute(lo.policy, lo.key, lo.packageName, "get_config", lo.binName) + if err != nil { + return nil, err + } + + if res == nil { + return nil, nil + } + return res.(map[interface{}]interface{}), err +} + +// Return list of all objects on the large object. +func (lo *baseLargeObject) scan(ifc LargeObject) ([]interface{}, error) { + ret, err := lo.client.Execute(lo.policy, lo.key, lo.packageName, "scan", lo.binName) + if err != nil { + return nil, err + } + + if ret == nil { + return []interface{}{}, nil + } + return ret.([]interface{}), nil +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/large_set.go b/vendor/github.com/aerospike/aerospike-client-go/large_set.go new file mode 100644 index 00000000000..67ed7a2bfe8 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/large_set.go @@ -0,0 +1,97 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +// LargeSet encapsulates a set within a single bin. +type LargeSet struct { + *baseLargeObject +} + +// NewLargeSet initializes a large set operator. +func NewLargeSet(client *Client, policy *WritePolicy, key *Key, binName string, userModule string) *LargeSet { + return &LargeSet{ + baseLargeObject: newLargeObject(client, policy, key, binName, userModule, "lset"), + } +} + +// Add adds values to the set. +// If the set does not exist, create it using specified userModule configuration. +func (ls *LargeSet) Add(values ...interface{}) error { + var err error + if len(values) == 1 { + _, err = ls.client.Execute(ls.policy, ls.key, ls.packageName, "add", ls.binName, NewValue(values[0]), ls.userModule) + } else { + _, err = ls.client.Execute(ls.policy, ls.key, ls.packageName, "add_all", ls.binName, ToValueArray(values), ls.userModule) + } + + return err +} + +// Remove delete value from set. +func (ls *LargeSet) Remove(value interface{}) error { + _, err := ls.client.Execute(ls.policy, ls.key, ls.packageName, "remove", ls.binName, NewValue(value)) + return err +} + +// Get selects a value from set. +func (ls *LargeSet) Get(value interface{}) (interface{}, error) { + return ls.client.Execute(ls.policy, ls.key, ls.packageName, "get", ls.binName, NewValue(value)) +} + +// Exists checks existence of value in the set. +func (ls *LargeSet) Exists(value interface{}) (bool, error) { + ret, err := ls.client.Execute(ls.policy, ls.key, ls.packageName, "exists", ls.binName, NewValue(value)) + if err != nil { + return false, err + } + return (ret == 1), nil +} + +// Scan returns all objects in the set. +func (ls *LargeSet) Scan() ([]interface{}, error) { + return ls.scan(ls) +} + +// Filter select values from set and applies specified Lua filter. +func (ls *LargeSet) Filter(filterName string, filterArgs ...interface{}) ([]interface{}, error) { + res, err := ls.client.Execute(ls.policy, ls.key, ls.packageName, "filter", ls.binName, ls.userModule, NewStringValue(filterName), ToValueArray(filterArgs)) + if err != nil { + return nil, err + } + + if err != nil { + return nil, err + } + + if res == nil { + return nil, nil + } + return res.([]interface{}), err +} + +// Destroy deletes the bin containing the set. +func (ls *LargeSet) Destroy() error { + return ls.destroy(ls) +} + +// Size returns size of the set. +func (ls *LargeSet) Size() (int, error) { + return ls.size(ls) +} + +// GetConfig returns map of set configuration parameters. +func (ls *LargeSet) GetConfig() (map[interface{}]interface{}, error) { + return ls.getConfig(ls) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/large_stack.go b/vendor/github.com/aerospike/aerospike-client-go/large_stack.go new file mode 100644 index 00000000000..33d0e7a1a5a --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/large_stack.go @@ -0,0 +1,107 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +///////////////////////////////////////////////////////////// +// +// NOTICE: +// THIS FEATURE HAS BEEN DEPRECATED ON SERVER. +// THE API WILL BE REMOVED FROM THE CLIENT IN THE FUTURE. +// +///////////////////////////////////////////////////////////// + +package aerospike + +// LargeStack encapsulates a stack within a single bin. +// A stack is last in/first out (LIFO) data structure. +type LargeStack struct { + *baseLargeObject +} + +// NewLargeStack initializes a large stack operator. +func NewLargeStack(client *Client, policy *WritePolicy, key *Key, binName string, userModule string) *LargeStack { + return &LargeStack{ + baseLargeObject: newLargeObject(client, policy, key, binName, userModule, "lstack"), + } +} + +// Push pushes values onto stack. +// If the stack does not exist, create it using specified userModule configuration. +func (lstk *LargeStack) Push(values ...interface{}) error { + var err error + if len(values) == 1 { + _, err = lstk.client.Execute(lstk.policy, lstk.key, lstk.packageName, "push", lstk.binName, NewValue(values[0]), lstk.userModule) + } else { + _, err = lstk.client.Execute(lstk.policy, lstk.key, lstk.packageName, "push_all", lstk.binName, ToValueArray(values), lstk.userModule) + } + return err +} + +// Peek select items from top of stack, without removing them +func (lstk *LargeStack) Peek(peekCount int) ([]interface{}, error) { + res, err := lstk.client.Execute(lstk.policy, lstk.key, lstk.packageName, "peek", lstk.binName, NewIntegerValue(peekCount)) + if err != nil { + return nil, err + } + + if res == nil { + return nil, nil + } + return res.([]interface{}), nil +} + +// Pop selects items from top of stack and then removes them. +func (lstk *LargeStack) Pop(count int) ([]interface{}, error) { + res, err := lstk.client.Execute(lstk.policy, lstk.key, lstk.packageName, "pop", lstk.binName, NewIntegerValue(count)) + if err != nil { + return nil, err + } + + if res == nil { + return nil, nil + } + return res.([]interface{}), nil +} + +// Scan returns all objects in the stack. +func (lstk *LargeStack) Scan() ([]interface{}, error) { + return lstk.scan(lstk) +} + +// Filter selects items from top of stack. +func (lstk *LargeStack) Filter(peekCount int, filterName string, filterArgs ...interface{}) ([]interface{}, error) { + res, err := lstk.client.Execute(lstk.policy, lstk.key, lstk.packageName, "filter", lstk.binName, NewIntegerValue(peekCount), lstk.userModule, NewStringValue(filterName), ToValueArray(filterArgs)) + if err != nil { + return nil, err + } + + if res == nil { + return nil, nil + } + return res.([]interface{}), nil +} + +// Destroy deletes the bin containing the stack. +func (lstk *LargeStack) Destroy() error { + return lstk.destroy(lstk) +} + +// Size returns size of the stack. +func (lstk *LargeStack) Size() (int, error) { + return lstk.size(lstk) +} + +// GetConfig returns map of stack configuration parameters. +func (lstk *LargeStack) GetConfig() (map[interface{}]interface{}, error) { + return lstk.getConfig(lstk) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/logger/logger.go b/vendor/github.com/aerospike/aerospike-client-go/logger/logger.go new file mode 100644 index 00000000000..8b7d42ef39a --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/logger/logger.go @@ -0,0 +1,111 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logger + +import ( + "log" + "os" + "sync" +) + +// LogPriority specifies the logging level for the client +type LogPriority int + +const ( + DEBUG LogPriority = iota - 1 + INFO + WARNING + ERR + OFF LogPriority = 999 +) + +type genericLogger interface { + Printf(format string, v ...interface{}) +} + +type logger struct { + Logger genericLogger + + level LogPriority + mutex sync.RWMutex +} + +// Logger is the default logger instance +var Logger = newLogger() + +func newLogger() *logger { + return &logger{ + Logger: log.New(os.Stdout, "", log.LstdFlags), + level: OFF, + } +} + +// SetLogger sets the *log.Logger object where log messages should be sent to. +func (lgr *logger) SetLogger(l genericLogger) { + lgr.mutex.Lock() + defer lgr.mutex.Unlock() + + lgr.Logger = l +} + +// SetLevel sets logging level. Default is ERR. +func (lgr *logger) SetLevel(level LogPriority) { + lgr.mutex.Lock() + defer lgr.mutex.Unlock() + + lgr.level = level +} + +// Error logs a message if log level allows to do so. +func (lgr *logger) LogAtLevel(level LogPriority, format string, v ...interface{}) { + switch level { + case DEBUG: + lgr.Debug(format, v) + case INFO: + lgr.Info(format, v) + case WARNING: + lgr.Warn(format, v) + case ERR: + lgr.Error(format, v) + } +} + +// Debug logs a message if log level allows to do so. +func (lgr *logger) Debug(format string, v ...interface{}) { + if lgr.level <= DEBUG { + lgr.Logger.Printf(format, v...) + } +} + +// Info logs a message if log level allows to do so. +func (lgr *logger) Info(format string, v ...interface{}) { + if lgr.level <= INFO { + lgr.Logger.Printf(format, v...) + } +} + +// Warn logs a message if log level allows to do so. +func (lgr *logger) Warn(format string, v ...interface{}) { + if lgr.level <= WARNING { + lgr.Logger.Printf(format, v...) + } +} + +// Error logs a message if log level allows to do so. +func (lgr *logger) Error(format string, v ...interface{}) { + if lgr.level <= ERR { + lgr.Logger.Printf(format, v...) + } +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/marshal.go b/vendor/github.com/aerospike/aerospike-client-go/marshal.go new file mode 100644 index 00000000000..9cce96f5e2c --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/marshal.go @@ -0,0 +1,341 @@ +// +build !as_performance + +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "fmt" + "math" + "reflect" + "strings" + "sync" + "time" + + . "github.com/aerospike/aerospike-client-go/types" +) + +var aerospikeTag = "as" + +const ( + aerospikeMetaTag = "asm" + keyTag = "key" +) + +// SetAerospikeTag sets the bin tag to the specified tag. +// This will be useful for when a user wants to use the same tag name for two different concerns. +// For example, one will be able to use the same tag name for both json and aerospike bin name. +func SetAerospikeTag(tag string) { + aerospikeTag = tag +} + +func valueToInterface(f reflect.Value, clusterSupportsFloat bool) interface{} { + // get to the core value + for f.Kind() == reflect.Ptr { + if f.IsNil() { + return nil + } + f = reflect.Indirect(f) + } + + switch f.Kind() { + case reflect.Uint64: + return int64(f.Uint()) + case reflect.Float64, reflect.Float32: + // support floats through integer encoding if + // server doesn't support floats + if clusterSupportsFloat { + return f.Float() + } + return int(math.Float64bits(f.Float())) + + case reflect.Struct: + if f.Type().PkgPath() == "time" && f.Type().Name() == "Time" { + return f.Interface().(time.Time).UTC().UnixNano() + } + return structToMap(f, clusterSupportsFloat) + case reflect.Bool: + if f.Bool() { + return int64(1) + } + return int64(0) + case reflect.Map: + if f.IsNil() { + return nil + } + + newMap := make(map[interface{}]interface{}, f.Len()) + for _, mk := range f.MapKeys() { + newMap[valueToInterface(mk, clusterSupportsFloat)] = valueToInterface(f.MapIndex(mk), clusterSupportsFloat) + } + + return newMap + case reflect.Slice, reflect.Array: + if f.Kind() == reflect.Slice && f.IsNil() { + return nil + } + if f.Kind() == reflect.Slice && reflect.TypeOf(f.Interface()).Elem().Kind() == reflect.Uint8 { + // handle blobs + return f.Interface().([]byte) + } + // convert to primitives recursively + newSlice := make([]interface{}, f.Len(), f.Cap()) + for i := 0; i < len(newSlice); i++ { + newSlice[i] = valueToInterface(f.Index(i), clusterSupportsFloat) + } + return newSlice + case reflect.Interface: + if f.IsNil() { + return nil + } + return f.Interface() + default: + return f.Interface() + } +} + +func fieldIsMetadata(f reflect.StructField) bool { + meta := f.Tag.Get(aerospikeMetaTag) + return strings.Trim(meta, " ") != "" +} + +func fieldAlias(f reflect.StructField) string { + alias := f.Tag.Get(aerospikeTag) + if alias != "" { + alias = strings.Trim(alias, " ") + + // if tag is -, the field should not be persisted + if alias == "-" { + return "" + } + return alias + } + return f.Name +} + +func structToMap(s reflect.Value, clusterSupportsFloat bool) map[string]interface{} { + if !s.IsValid() { + return nil + } + + typeOfT := s.Type() + numFields := s.NumField() + + var binMap map[string]interface{} + for i := 0; i < numFields; i++ { + // skip unexported fields + if typeOfT.Field(i).PkgPath != "" { + continue + } + + if fieldIsMetadata(typeOfT.Field(i)) { + continue + } + + // skip transiet fields tagged `-` + alias := fieldAlias(typeOfT.Field(i)) + if alias == "" { + continue + } + + binValue := valueToInterface(s.Field(i), clusterSupportsFloat) + + if binMap == nil { + binMap = make(map[string]interface{}, numFields) + } + + binMap[alias] = binValue + } + + return binMap +} + +func marshal(v interface{}, clusterSupportsFloat bool) []*Bin { + s := indirect(reflect.ValueOf(v)) + numFields := s.NumField() + bins := binPool.Get(numFields).([]*Bin) + + binCount := 0 + n := structToMap(s, clusterSupportsFloat) + for k, v := range n { + bins[binCount].Name = k + + bins[binCount].Value = NewValue(v) + binCount++ + } + + return bins[:binCount] +} + +type syncMap struct { + objectMappings map[reflect.Type]map[string]string + objectFields map[reflect.Type][]string + objectTTLs map[reflect.Type][]string + objectGen map[reflect.Type][]string + mutex sync.RWMutex +} + +func (sm *syncMap) setMapping(objType reflect.Type, mapping map[string]string, fields, ttl, gen []string) { + sm.mutex.Lock() + sm.objectMappings[objType] = mapping + sm.objectFields[objType] = fields + sm.objectTTLs[objType] = ttl + sm.objectGen[objType] = gen + sm.mutex.Unlock() +} + +func indirect(obj reflect.Value) reflect.Value { + for obj.Kind() == reflect.Ptr { + if obj.IsNil() { + return obj + } + obj = obj.Elem() + } + return obj +} + +func indirectT(objType reflect.Type) reflect.Type { + for objType.Kind() == reflect.Ptr { + objType = objType.Elem() + } + return objType +} + +func (sm *syncMap) mappingExists(objType reflect.Type) (map[string]string, bool) { + sm.mutex.RLock() + mapping, exists := sm.objectMappings[objType] + sm.mutex.RUnlock() + return mapping, exists +} + +func (sm *syncMap) getMapping(objType reflect.Type) map[string]string { + objType = indirectT(objType) + mapping, exists := sm.mappingExists(objType) + if !exists { + cacheObjectTags(objType) + mapping, _ = sm.mappingExists(objType) + } + + return mapping +} + +func (sm *syncMap) getMetaMappings(objType reflect.Type) (ttl, gen []string) { + objType = indirectT(objType) + if _, exists := sm.mappingExists(objType); !exists { + cacheObjectTags(objType) + } + + sm.mutex.RLock() + ttl = sm.objectTTLs[objType] + gen = sm.objectGen[objType] + sm.mutex.RUnlock() + return ttl, gen +} + +func (sm *syncMap) fieldsExists(objType reflect.Type) ([]string, bool) { + sm.mutex.RLock() + mapping, exists := sm.objectFields[objType] + sm.mutex.RUnlock() + return mapping, exists +} + +func (sm *syncMap) getFields(objType reflect.Type) []string { + objType = indirectT(objType) + fields, exists := sm.fieldsExists(objType) + if !exists { + cacheObjectTags(objType) + fields, _ = sm.fieldsExists(objType) + } + + return fields +} + +var objectMappings = &syncMap{ + objectMappings: map[reflect.Type]map[string]string{}, + objectFields: map[reflect.Type][]string{}, + objectTTLs: map[reflect.Type][]string{}, + objectGen: map[reflect.Type][]string{}, +} + +func cacheObjectTags(objType reflect.Type) { + mapping := map[string]string{} + fields := []string{} + ttl := []string{} + gen := []string{} + + numFields := objType.NumField() + for i := 0; i < numFields; i++ { + f := objType.Field(i) + // skip unexported fields + if f.PkgPath != "" { + continue + } + + tag := strings.Trim(f.Tag.Get(aerospikeTag), " ") + tagM := strings.Trim(f.Tag.Get(aerospikeMetaTag), " ") + + if tag != "" && tagM != "" { + panic(fmt.Sprintf("Cannot accept both data and metadata tags on the same attribute on struct: %s.%s", objType.Name(), f.Name)) + } + + if tag != "-" && tagM == "" { + if tag != "" { + mapping[tag] = f.Name + fields = append(fields, tag) + } else { + fields = append(fields, f.Name) + } + } + + if tagM == "ttl" { + ttl = append(ttl, f.Name) + } else if tagM == "gen" { + gen = append(gen, f.Name) + } else if tagM != "" { + panic(fmt.Sprintf("Invalid metadata tag `%s` on struct attribute: %s.%s", tagM, objType.Name(), f.Name)) + } + } + + objectMappings.setMapping(objType, mapping, fields, ttl, gen) +} + +func binMapToBins(bins []*Bin, binMap BinMap) []*Bin { + i := 0 + for k, v := range binMap { + bins[i].Name = k + bins[i].Value = NewValue(v) + i++ + } + + return bins +} + +// pool Bins so that we won't have to allocate them every time +var binPool = NewPool(512) + +func init() { + binPool.New = func(params ...interface{}) interface{} { + size := params[0].(int) + bins := make([]*Bin, size, size) + for i := range bins { + bins[i] = &Bin{} + } + return bins + } + + binPool.IsUsable = func(obj interface{}, params ...interface{}) bool { + return len(obj.([]*Bin)) >= params[0].(int) + } +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/multi_policy.go b/vendor/github.com/aerospike/aerospike-client-go/multi_policy.go new file mode 100644 index 00000000000..5d67044b940 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/multi_policy.go @@ -0,0 +1,47 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +// MultiPolicy contains parameters for policy attributes used in +// query and scan operations. +type MultiPolicy struct { + *BasePolicy + + // Maximum number of concurrent requests to server nodes at any poin int time. + // If there are 16 nodes in the cluster and maxConcurrentNodes is 8, then queries + // will be made to 8 nodes in parallel. When a query completes, a new query will + // be issued until all 16 nodes have been queried. + // Default (0) is to issue requests to all server nodes in parallel. + MaxConcurrentNodes int + + // Number of records to place in queue before blocking. + // Records received from multiple server nodes will be placed in a queue. + // A separate goroutine consumes these records in parallel. + // If the queue is full, the producer goroutines will block until records are consumed. + RecordQueueSize int //= 5000 + + // Blocks until on-going migrations are over + WaitUntilMigrationsAreOver bool //=false +} + +// NewMultiPolicy initializes a MultiPolicy instance with default values. +func NewMultiPolicy() *MultiPolicy { + return &MultiPolicy{ + BasePolicy: NewPolicy(), + MaxConcurrentNodes: 0, + RecordQueueSize: 50, + WaitUntilMigrationsAreOver: false, + } +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/node.go b/vendor/github.com/aerospike/aerospike-client-go/node.go new file mode 100644 index 00000000000..14628c86cc0 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/node.go @@ -0,0 +1,602 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + . "github.com/aerospike/aerospike-client-go/logger" + . "github.com/aerospike/aerospike-client-go/types" + . "github.com/aerospike/aerospike-client-go/types/atomic" +) + +const ( + _PARTITIONS = 4096 +) + +// Node represents an Aerospike Database Server Node +type Node struct { + cluster *Cluster + name string + host *Host + aliases atomic.Value //[]*Host + + // tendConn reserves a connection for tend so that it won't have to + // wait in queue for connections, since that will cause starvation + // and the node being dropped under load. + tendConn *Connection + tendConnLock sync.Mutex // All uses of tend connection should be synchronized + + peersGeneration AtomicInt + peersCount AtomicInt + + connections connectionQueue //AtomicQueue //ArrayBlockingQueue<*Connection> + connectionCount AtomicInt + health AtomicInt //AtomicInteger + + partitionMap partitionMap + partitionGeneration AtomicInt + referenceCount AtomicInt + failures AtomicInt + partitionChanged AtomicBool + + active AtomicBool + + supportsFloat, supportsBatchIndex, supportsReplicasAll, supportsGeo, supportsPeers AtomicBool +} + +// NewNode initializes a server node with connection parameters. +func newNode(cluster *Cluster, nv *nodeValidator) *Node { + newNode := &Node{ + cluster: cluster, + name: nv.name, + // address: nv.primaryAddress, + host: nv.primaryHost, + + // Assign host to first IP alias because the server identifies nodes + // by IP address (not hostname). + connections: *newConnectionQueue(cluster.clientPolicy.ConnectionQueueSize), //*NewAtomicQueue(cluster.clientPolicy.ConnectionQueueSize), + connectionCount: *NewAtomicInt(0), + peersGeneration: *NewAtomicInt(-1), + partitionGeneration: *NewAtomicInt(-2), + referenceCount: *NewAtomicInt(0), + failures: *NewAtomicInt(0), + active: *NewAtomicBool(true), + partitionChanged: *NewAtomicBool(false), + + supportsFloat: *NewAtomicBool(nv.supportsFloat), + supportsBatchIndex: *NewAtomicBool(nv.supportsBatchIndex), + supportsReplicasAll: *NewAtomicBool(nv.supportsReplicasAll), + supportsGeo: *NewAtomicBool(nv.supportsGeo), + supportsPeers: *NewAtomicBool(nv.supportsPeers), + } + + newNode.aliases.Store(nv.aliases) + + return newNode +} + +// Refresh requests current status from server node, and updates node with the result. +func (nd *Node) Refresh(peers *peers) error { + if !nd.active.Get() { + return nil + } + + // Close idleConnections + defer nd.dropIdleConnections() + + nd.referenceCount.Set(0) + + if peers.usePeers.Get() { + infoMap, err := nd.RequestInfo("node", "peers-generation", "partition-generation") + if err != nil { + nd.refreshFailed(err) + return err + } + + if err := nd.verifyNodeName(infoMap); err != nil { + nd.refreshFailed(err) + return err + } + + if err := nd.verifyPeersGeneration(infoMap, peers); err != nil { + nd.refreshFailed(err) + return err + } + + if err := nd.verifyPartitionGeneration(infoMap); err != nil { + nd.refreshFailed(err) + return err + } + } else { + commands := []string{"node", "partition-generation", nd.cluster.clientPolicy.serviceString()} + + infoMap, err := nd.RequestInfo(commands...) + if err != nil { + nd.refreshFailed(err) + return err + } + + if err := nd.verifyNodeName(infoMap); err != nil { + nd.refreshFailed(err) + return err + } + + if err = nd.verifyPartitionGeneration(infoMap); err != nil { + nd.refreshFailed(err) + return err + } + + if err = nd.addFriends(infoMap, peers); err != nil { + nd.refreshFailed(err) + return err + } + } + nd.failures.Set(0) + peers.refreshCount.IncrementAndGet() + nd.referenceCount.IncrementAndGet() + + return nil +} + +func (nd *Node) verifyNodeName(infoMap map[string]string) error { + infoName, exists := infoMap["node"] + + if !exists || len(infoName) == 0 { + return NewAerospikeError(INVALID_NODE_ERROR, "Node name is empty") + } + + if !(nd.name == infoName) { + // Set node to inactive immediately. + nd.active.Set(false) + return NewAerospikeError(INVALID_NODE_ERROR, "Node name has changed. Old="+nd.name+" New="+infoName) + } + return nil +} + +func (nd *Node) verifyPeersGeneration(infoMap map[string]string, peers *peers) error { + genString := infoMap["peers-generation"] + if len(genString) == 0 { + return NewAerospikeError(PARSE_ERROR, "peers-generation is empty") + } + + gen, err := strconv.Atoi(genString) + if err != nil { + return NewAerospikeError(PARSE_ERROR, "peers-generation is not a number: "+genString) + } + + peers.genChanged.Or(nd.peersGeneration.Get() != gen) + return nil +} + +func (nd *Node) verifyPartitionGeneration(infoMap map[string]string) error { + genString := infoMap["partition-generation"] + + if len(genString) == 0 { + return NewAerospikeError(PARSE_ERROR, "partition-generation is empty") + } + + gen, err := strconv.Atoi(genString) + if err != nil { + return NewAerospikeError(PARSE_ERROR, "partition-generation is not a number:"+genString) + } + + if nd.partitionGeneration.Get() != gen { + nd.partitionChanged.Set(true) + } + return nil +} + +func (nd *Node) addFriends(infoMap map[string]string, peers *peers) error { + friendString, exists := infoMap[nd.cluster.clientPolicy.serviceString()] + + if !exists || len(friendString) == 0 { + nd.peersCount.Set(0) + return nil + } + + friendNames := strings.Split(friendString, ";") + nd.peersCount.Set(len(friendNames)) + + for _, friend := range friendNames { + friendInfo := strings.Split(friend, ":") + + if len(friendInfo) != 2 { + Logger.Error("Node info from asinfo:services is malformed. Expected HOST:PORT, but got `%s`", friend) + continue + } + + hostName := friendInfo[0] + port, _ := strconv.Atoi(friendInfo[1]) + + if nd.cluster.clientPolicy.IpMap != nil { + if alternativeHost, ok := nd.cluster.clientPolicy.IpMap[hostName]; ok { + hostName = alternativeHost + } + } + + host := NewHost(hostName, port) + node := nd.cluster.findAlias(host) + + if node != nil { + node.referenceCount.IncrementAndGet() + } else { + if !peers.hostExists(*host) { + nd.prepareFriend(host, peers) + } + } + } + + return nil +} + +func (nd *Node) prepareFriend(host *Host, peers *peers) bool { + nv := &nodeValidator{} + if err := nv.validateNode(nd.cluster, host); err != nil { + Logger.Warn("Adding node `%s` failed: ", host, err) + return false + } + + node := peers.nodeByName(nv.name) + + if node != nil { + // Duplicate node name found. This usually occurs when the server + // services list contains both internal and external IP addresses + // for the same node. + peers.addHost(*host) + node.addAlias(host) + return true + } + + // Check for duplicate nodes in cluster. + node = nd.cluster.nodesMap.Get().(map[string]*Node)[nv.name] + + if node != nil { + peers.addHost(*host) + node.addAlias(host) + node.referenceCount.IncrementAndGet() + nd.cluster.addAlias(host, node) + return true + } + + node = nd.cluster.createNode(nv) + peers.addHost(*host) + peers.addNode(nv.name, node) + return true +} + +func (nd *Node) refreshPeers(peers *peers) { + // Do not refresh peers when node connection has already failed during this cluster tend iteration. + if nd.failures.Get() > 0 || !nd.active.Get() { + return + } + + peerParser, err := parsePeers(nd.cluster, nd) + if err != nil { + Logger.Debug("Parsing peers failed: %s", err) + nd.refreshFailed(err) + return + } + + peers.appendPeers(peerParser.peers) + nd.peersGeneration.Set(int(peerParser.generation())) + nd.peersCount.Set(len(peers.peers())) + peers.refreshCount.IncrementAndGet() +} + +func (nd *Node) refreshPartitions(peers *peers) { + // Do not refresh peers when node connection has already failed during this cluster tend iteration. + // Also, avoid "split cluster" case where this node thinks it's a 1-node cluster. + // Unchecked, such a node can dominate the partition map and cause all other + // nodes to be dropped. + if nd.failures.Get() > 0 || !nd.active.Get() || (nd.peersCount.Get() == 0 && peers.refreshCount.Get() > 1) { + return + } + + parser, err := newPartitionParser(nd, _PARTITIONS, nd.cluster.clientPolicy.RequestProleReplicas) + if err != nil { + nd.refreshFailed(err) + return + } + + if parser.generation != nd.partitionGeneration.Get() { + Logger.Info("Node %s partition generation %d changed to %d", nd.GetName(), nd.partitionGeneration.Get(), parser.getGeneration()) + nd.partitionMap = parser.getPartitionMap() + nd.partitionChanged.Set(true) + nd.partitionGeneration.Set(parser.getGeneration()) + } +} + +func (nd *Node) refreshFailed(e error) { + nd.failures.IncrementAndGet() + + // Only log message if cluster is still active. + if nd.cluster.IsConnected() { + Logger.Warn("Node `%s` refresh failed: `%s`", nd, e) + } +} + +// dropIdleConnections picks a connection from the head of the connection pool queue +// if that connection is idle, it drops it and takes the next one until it picks +// a fresh connection or exhaust the queue. +func (nd *Node) dropIdleConnections() { + nd.connections.DropIdle() +} + +// GetConnection gets a connection to the node. +// If no pooled connection is available, a new connection will be created, unless +// ClientPolicy.MaxQueueSize number of connections are already created. +// This method will retry to retrieve a connection in case the connection pool +// is empty, until timeout is reached. +func (nd *Node) GetConnection(timeout time.Duration) (conn *Connection, err error) { + deadline := time.Now().Add(timeout) + if timeout == 0 { + deadline = time.Now().Add(time.Second) + } + +CL: + // try to acquire a connection; if the connection pool is empty, retry until + // timeout occures. If no timeout is set, will retry indefinitely. + conn, err = nd.getConnection(timeout) + if err != nil { + if err == ErrConnectionPoolEmpty && nd.IsActive() && time.Now().Before(deadline) { + // give the scheduler time to breath; affects latency minimally, but throughput drastically + time.Sleep(time.Microsecond) + goto CL + } + + return nil, err + } + + return conn, nil +} + +// getConnection gets a connection to the node. +// If no pooled connection is available, a new connection will be created. +// This method does not include logic to retry in case the connection pool is empty +func (nd *Node) getConnection(timeout time.Duration) (conn *Connection, err error) { + return nd.getConnectionWithHint(timeout, 0) +} + +// getConnectionWithHint gets a connection to the node. +// If no pooled connection is available, a new connection will be created. +// This method does not include logic to retry in case the connection pool is empty +func (nd *Node) getConnectionWithHint(timeout time.Duration, hint byte) (conn *Connection, err error) { + // try to get a valid connection from the connection pool + for t := nd.connections.Poll(hint); t != nil; t = nd.connections.Poll(hint) { + conn = t //.(*Connection) + if conn.IsConnected() { + break + } + conn.Close() + conn = nil + } + + if conn == nil { + cc := nd.connectionCount.IncrementAndGet() + + // if connection count is limited and enough connections are already created, don't create a new one + if nd.cluster.clientPolicy.LimitConnectionsToQueueSize && cc > nd.cluster.clientPolicy.ConnectionQueueSize { + nd.connectionCount.DecrementAndGet() + return nil, ErrConnectionPoolEmpty + } + + if conn, err = NewSecureConnection(&nd.cluster.clientPolicy, nd.host); err != nil { + nd.connectionCount.DecrementAndGet() + return nil, err + } + conn.node = nd + + // need to authenticate + if err = conn.Authenticate(nd.cluster.user, nd.cluster.Password()); err != nil { + // Socket not authenticated. Do not put back into pool. + conn.Close() + return nil, err + } + } + + if err = conn.SetTimeout(timeout); err != nil { + // Do not put back into pool. + conn.Close() + return nil, err + } + + conn.setIdleTimeout(nd.cluster.clientPolicy.IdleTimeout) + conn.refresh() + + return conn, nil +} + +// PutConnection puts back a connection to the pool. +// If connection pool is full, the connection will be +// closed and discarded. +func (nd *Node) putConnectionWithHint(conn *Connection, hint byte) { + conn.refresh() + if !nd.active.Get() || !nd.connections.Offer(conn, hint) { + conn.Close() + } +} + +// PutConnection puts back a connection to the pool. +// If connection pool is full, the connection will be +// closed and discarded. +func (nd *Node) PutConnection(conn *Connection) { + nd.putConnectionWithHint(conn, 0) +} + +// InvalidateConnection closes and discards a connection from the pool. +func (nd *Node) InvalidateConnection(conn *Connection) { + conn.Close() +} + +// GetHost retrieves host for the node. +func (nd *Node) GetHost() *Host { + return nd.host +} + +// IsActive Checks if the node is active. +func (nd *Node) IsActive() bool { + return nd != nil && nd.active.Get() && nd.partitionGeneration.Get() >= -1 +} + +// GetName returns node name. +func (nd *Node) GetName() string { + return nd.name +} + +// GetAliases returns node aliases. +func (nd *Node) GetAliases() []*Host { + return nd.aliases.Load().([]*Host) +} + +// Sets node aliases +func (nd *Node) setAliases(aliases []*Host) { + nd.aliases.Store(aliases) +} + +// AddAlias adds an alias for the node +func (nd *Node) addAlias(aliasToAdd *Host) { + // Aliases are only referenced in the cluster tend goroutine, + // so synchronization is not necessary. + aliases := nd.GetAliases() + if aliases == nil { + aliases = []*Host{} + } + + aliases = append(aliases, aliasToAdd) + nd.setAliases(aliases) +} + +// Close marks node as inactive and closes all of its pooled connections. +func (nd *Node) Close() { + nd.active.Set(false) + nd.closeConnections() +} + +// String implements stringer interface +func (nd *Node) String() string { + return nd.name + " " + nd.host.String() +} + +func (nd *Node) closeConnections() { + for conn := nd.connections.Poll(0); conn != nil; conn = nd.connections.Poll(0) { + // conn.(*Connection).Close() + conn.Close() + } +} + +// Equals compares equality of two nodes based on their names. +func (nd *Node) Equals(other *Node) bool { + return nd != nil && other != nil && (nd == other || nd.name == other.name) +} + +// MigrationInProgress determines if the node is participating in a data migration +func (nd *Node) MigrationInProgress() (bool, error) { + values, err := RequestNodeStats(nd) + if err != nil { + return false, err + } + + // if the migration_progress_send exists and is not `0`, then migration is in progress + if migration, exists := values["migrate_progress_send"]; exists && migration != "0" { + return true, nil + } + + // migration not in progress + return false, nil +} + +// WaitUntillMigrationIsFinished will block until migration operations are finished. +func (nd *Node) WaitUntillMigrationIsFinished(timeout time.Duration) (err error) { + if timeout <= 0 { + timeout = _NO_TIMEOUT + } + done := make(chan error) + + go func() { + // this function is guaranteed to return after timeout + // no go routines will be leaked + for { + if res, err := nd.MigrationInProgress(); err != nil || !res { + done <- err + return + } + } + }() + + dealine := time.After(timeout) + select { + case <-dealine: + return NewAerospikeError(TIMEOUT) + case err = <-done: + return err + } +} + +// initTendConn sets up a connection to be used for info requests. +// The same connection will be used for tend. +func (nd *Node) initTendConn(timeout time.Duration) error { + if nd.tendConn == nil || !nd.tendConn.IsConnected() { + // Tend connection required a long timeout + tendConn, err := nd.GetConnection(timeout) + if err != nil { + return err + } + + nd.tendConn = tendConn + } + + // Set timeout for tend conn + return nd.tendConn.SetTimeout(timeout) +} + +// RequestInfo gets info values by name from the specified database server node. +func (nd *Node) RequestInfo(name ...string) (map[string]string, error) { + nd.tendConnLock.Lock() + defer nd.tendConnLock.Unlock() + + if err := nd.initTendConn(nd.cluster.clientPolicy.Timeout); err != nil { + return nil, err + } + + response, err := RequestInfo(nd.tendConn, name...) + if err != nil { + nd.tendConn.Close() + return nil, err + } + return response, nil +} + +// requestRawInfo gets info values by name from the specified database server node. +// It won't parse the results. +func (nd *Node) requestRawInfo(name ...string) (*info, error) { + nd.tendConnLock.Lock() + defer nd.tendConnLock.Unlock() + + if err := nd.initTendConn(nd.cluster.clientPolicy.Timeout); err != nil { + return nil, err + } + + response, err := newInfo(nd.tendConn, name...) + if err != nil { + nd.tendConn.Close() + return nil, err + } + return response, nil +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/node_error.go b/vendor/github.com/aerospike/aerospike-client-go/node_error.go new file mode 100644 index 00000000000..3b2e11daabe --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/node_error.go @@ -0,0 +1,46 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + . "github.com/aerospike/aerospike-client-go/types" +) + +// NodeError is a type to encapsulate the node that the error occurred in. +type NodeError struct { + error + + node *Node +} + +func newNodeError(node *Node, err error) *NodeError { + return &NodeError{ + error: err, + node: node, + } +} + +func newAerospikeNodeError(node *Node, code ResultCode, messages ...string) *NodeError { + return &NodeError{ + error: NewAerospikeError(code, messages...), + node: node, + } +} + +// Node returns the node where the error occurred. +func (ne *NodeError) Node() *Node { return ne.node } + +// Err returns the error +func (ne *NodeError) Err() error { return ne.error } diff --git a/vendor/github.com/aerospike/aerospike-client-go/node_validator.go b/vendor/github.com/aerospike/aerospike-client-go/node_validator.go new file mode 100644 index 00000000000..96a19b4ec85 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/node_validator.go @@ -0,0 +1,211 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "bytes" + "fmt" + "net" + "strings" + "sync" + + . "github.com/aerospike/aerospike-client-go/logger" + . "github.com/aerospike/aerospike-client-go/types" +) + +type nodesToAddT struct { + nodesToAdd map[string]*Node + mutex sync.RWMutex +} + +func (nta *nodesToAddT) addNodeIfNotExists(ndv *nodeValidator, cluster *Cluster) bool { + nta.mutex.Lock() + defer nta.mutex.Unlock() + + _, exists := nta.nodesToAdd[ndv.name] + if !exists { + // found a new node + node := cluster.createNode(ndv) + nta.nodesToAdd[ndv.name] = node + } + return exists +} + +// Validates a Database server node +type nodeValidator struct { + name string + aliases []*Host + primaryHost *Host + + supportsFloat, supportsBatchIndex, supportsReplicasAll, supportsGeo, supportsPeers bool +} + +func (ndv *nodeValidator) seedNodes(cluster *Cluster, host *Host, nodesToAdd *nodesToAddT) error { + if err := ndv.setAliases(host); err != nil { + return err + } + + found := false + var resultErr error + for _, alias := range ndv.aliases { + if resultErr = ndv.validateAlias(cluster, alias); resultErr != nil { + Logger.Debug("Alias %s failed: %s", alias, resultErr) + continue + } + + found = true + nodesToAdd.addNodeIfNotExists(ndv, cluster) + } + + if !found { + return resultErr + } + return nil +} + +func (ndv *nodeValidator) validateNode(cluster *Cluster, host *Host) error { + if clusterNodes := cluster.GetNodes(); cluster.clientPolicy.IgnoreOtherSubnetAliases && len(clusterNodes) > 0 { + masterHostname := clusterNodes[0].host.Name + ip, ipnet, err := net.ParseCIDR(masterHostname + "/24") + if err != nil { + Logger.Error(err.Error()) + return NewAerospikeError(NO_AVAILABLE_CONNECTIONS_TO_NODE, "Failed parsing hostname...") + } + + stop := ip.Mask(ipnet.Mask) + stop[3] += 255 + if bytes.Compare(net.ParseIP(host.Name).To4(), ip.Mask(ipnet.Mask).To4()) >= 0 && bytes.Compare(net.ParseIP(host.Name).To4(), stop.To4()) < 0 { + } else { + return NewAerospikeError(NO_AVAILABLE_CONNECTIONS_TO_NODE, "Ignored hostname from other subnet...") + } + } + + if err := ndv.setAliases(host); err != nil { + return err + } + + var resultErr error + for _, alias := range ndv.aliases { + if err := ndv.validateAlias(cluster, alias); err != nil { + resultErr = err + Logger.Debug("Aliases %s failed: %s", alias, err) + continue + } + return nil + } + + return resultErr +} + +func (ndv *nodeValidator) setAliases(host *Host) error { + // IP addresses do not need a lookup + ip := net.ParseIP(host.Name) + if ip != nil { + aliases := make([]*Host, 1) + aliases[0] = NewHost(host.Name, host.Port) + aliases[0].TLSName = host.TLSName + ndv.aliases = aliases + } else { + addresses, err := net.LookupHost(host.Name) + if err != nil { + Logger.Error("Host lookup failed with error: %s", err.Error()) + return err + } + aliases := make([]*Host, len(addresses)) + for idx, addr := range addresses { + aliases[idx] = NewHost(addr, host.Port) + aliases[idx].TLSName = host.TLSName + } + ndv.aliases = aliases + } + Logger.Debug("Node Validator has %d nodes and they are: %v", len(ndv.aliases), ndv.aliases) + return nil +} + +func (ndv *nodeValidator) validateAlias(cluster *Cluster, alias *Host) error { + conn, err := NewSecureConnection(&cluster.clientPolicy, alias) + if err != nil { + return err + } + defer conn.Close() + + // need to authenticate + if err := conn.Authenticate(cluster.user, cluster.Password()); err != nil { + return err + } + + // check to make sure we have actually connected + info, err := RequestInfo(conn, "build") + if err != nil { + return err + } + if _, exists := info["ERROR:80:not authenticated"]; exists { + return NewAerospikeError(NOT_AUTHENTICATED) + } + + hasClusterName := len(cluster.clientPolicy.ClusterName) > 0 + + var infoKeys []string + if hasClusterName { + infoKeys = []string{"node", "features", "cluster-name"} + } else { + infoKeys = []string{"node", "features"} + } + infoMap, err := RequestInfo(conn, infoKeys...) + if err != nil { + return err + } + + nodeName, exists := infoMap["node"] + if !exists { + return NewAerospikeError(INVALID_NODE_ERROR) + } + + if hasClusterName { + id := infoMap["cluster-name"] + + if len(id) == 0 || id != cluster.clientPolicy.ClusterName { + return NewAerospikeError(CLUSTER_NAME_MISMATCH_ERROR, fmt.Sprintf("Node %s (%s) expected cluster name `%s` but received `%s`", nodeName, alias.String(), cluster.clientPolicy.ClusterName, id)) + } + } + + // set features + if features, exists := infoMap["features"]; exists { + ndv.setFeatures(features) + } + + ndv.name = nodeName + ndv.primaryHost = alias + + return nil +} + +func (ndv *nodeValidator) setFeatures(features string) { + featureList := strings.Split(features, ";") + for i := range featureList { + switch featureList[i] { + case "float": + ndv.supportsFloat = true + case "batch-index": + ndv.supportsBatchIndex = true + case "replicas-all": + ndv.supportsReplicasAll = true + case "geo": + ndv.supportsGeo = true + case "peers": + ndv.supportsPeers = true + } + } +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/operate_command.go b/vendor/github.com/aerospike/aerospike-client-go/operate_command.go new file mode 100644 index 00000000000..b4f463200ce --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/operate_command.go @@ -0,0 +1,42 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +type operateCommand struct { + readCommand + + policy *WritePolicy + operations []*Operation +} + +func newOperateCommand(cluster *Cluster, policy *WritePolicy, key *Key, operations []*Operation) operateCommand { + return operateCommand{ + readCommand: newReadCommand(cluster, &policy.BasePolicy, key, nil), + policy: policy, + operations: operations, + } +} + +func (cmd *operateCommand) writeBuffer(ifc command) error { + return cmd.setOperate(cmd.policy, cmd.key, cmd.operations) +} + +func (cmd *operateCommand) getNode(ifc command) (*Node, error) { + return cmd.cluster.getMasterNode(&cmd.partition) +} + +func (cmd *operateCommand) Execute() error { + return cmd.execute(cmd) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/operation.go b/vendor/github.com/aerospike/aerospike-client-go/operation.go new file mode 100644 index 00000000000..cc5a4b88de3 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/operation.go @@ -0,0 +1,119 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +type operation interface { + write(cmd BufferEx) (int, error) +} + +// OperationType determines operation type +type OperationType *struct{ op byte } +type OperationSubType *int + +// Valid OperationType values that can be used to create custom Operations. +// The names are self-explanatory. +var ( + READ OperationType = &struct{ op byte }{1} + // READ_HEADER *OperationType = &struct{op: 1 } + + WRITE OperationType = &struct{ op byte }{2} + CDT_READ OperationType = &struct{ op byte }{3} + CDT_MODIFY OperationType = &struct{ op byte }{4} + MAP_READ OperationType = &struct{ op byte }{3} + MAP_MODIFY OperationType = &struct{ op byte }{4} + ADD OperationType = &struct{ op byte }{5} + APPEND OperationType = &struct{ op byte }{9} + PREPEND OperationType = &struct{ op byte }{10} + TOUCH OperationType = &struct{ op byte }{11} +) + +// Operation contains operation definition. +// This struct is used in client's operate() method. +type Operation struct { + + // OpType determines type of operation. + opType OperationType + // used in CDT commands + opSubType OperationSubType + + encoder func(*Operation, BufferEx) (int, error) + + // binName (Optional) determines the name of bin used in operation. + binName string + + // binValue (Optional) determines bin value used in operation. + binValue Value + + // will be true ONLY for GetHeader() operation + headerOnly bool + + // reused determines if the operation is cached. If so, it will cache the + // internal bytes in binValue field and remove the encoder for maximum performance + used bool +} + +// cache uses the encoder and caches the packed operation for further use. +func (op *Operation) cache() error { + packer := newPacker() + + if _, err := op.encoder(op, packer); err != nil { + return err + } + + op.binValue = BytesValue(packer.Bytes()) + op.encoder = nil // do not encode anymore; just use the cache + op.used = false // do not encode anymore; just use the cache + return nil +} + +// GetOpForBin creates read bin database operation. +func GetOpForBin(binName string) *Operation { + return &Operation{opType: READ, binName: binName, binValue: NewNullValue()} +} + +// GetOp creates read all record bins database operation. +func GetOp() *Operation { + return &Operation{opType: READ, binValue: NewNullValue()} +} + +// GetHeaderOp creates read record header database operation. +func GetHeaderOp() *Operation { + return &Operation{opType: READ, headerOnly: true, binValue: NewNullValue()} +} + +// PutOp creates set database operation. +func PutOp(bin *Bin) *Operation { + return &Operation{opType: WRITE, binName: bin.Name, binValue: bin.Value} +} + +// AppendOp creates string append database operation. +func AppendOp(bin *Bin) *Operation { + return &Operation{opType: APPEND, binName: bin.Name, binValue: bin.Value} +} + +// PrependOp creates string prepend database operation. +func PrependOp(bin *Bin) *Operation { + return &Operation{opType: PREPEND, binName: bin.Name, binValue: bin.Value} +} + +// AddOp creates integer add database operation. +func AddOp(bin *Bin) *Operation { + return &Operation{opType: ADD, binName: bin.Name, binValue: bin.Value} +} + +// TouchOp creates touch database operation. +func TouchOp() *Operation { + return &Operation{opType: TOUCH, binValue: NewNullValue()} +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/packer.go b/vendor/github.com/aerospike/aerospike-client-go/packer.go new file mode 100644 index 00000000000..e2215cf0332 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/packer.go @@ -0,0 +1,662 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "bytes" + "encoding/binary" + "fmt" + "math" + "reflect" + "time" + + ParticleType "github.com/aerospike/aerospike-client-go/types/particle_type" + Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" +) + +var __packObjectReflect func(BufferEx, interface{}, bool) (int, error) + +func __PackIfcList(cmd BufferEx, list []interface{}) (int, error) { + size := 0 + n, err := __PackArrayBegin(cmd, len(list)) + if err != nil { + return n, err + } + size += n + + for i := range list { + n, err := __PackObject(cmd, list[i], false) + if err != nil { + return 0, err + } + size += n + } + + return size, err +} + +// PackList packs any slice that implement the ListIter interface +func PackList(cmd BufferEx, list ListIter) (int, error) { + return __PackList(cmd, list) +} + +func __PackList(cmd BufferEx, list ListIter) (int, error) { + size := 0 + n, err := __PackArrayBegin(cmd, list.Len()) + if err != nil { + return n, err + } + size += n + + n, err = list.PackList(cmd) + return size + n, err +} + +func __PackValueArray(cmd BufferEx, list ValueArray) (int, error) { + size := 0 + n, err := __PackArrayBegin(cmd, len(list)) + if err != nil { + return n, err + } + size += n + + for i := range list { + n, err := list[i].pack(cmd) + if err != nil { + return 0, err + } + size += n + } + + return size, err +} + +func __PackArrayBegin(cmd BufferEx, size int) (int, error) { + if size < 16 { + return __PackAByte(cmd, 0x90|byte(size)) + } else if size <= math.MaxUint16 { + return __PackShort(cmd, 0xdc, int16(size)) + } else { + return __PackInt(cmd, 0xdd, int32(size)) + } +} + +func __PackIfcMap(cmd BufferEx, theMap map[interface{}]interface{}) (int, error) { + size := 0 + n, err := __PackMapBegin(cmd, len(theMap)) + if err != nil { + return n, err + } + size += n + + for k, v := range theMap { + n, err := __PackObject(cmd, k, true) + if err != nil { + return 0, err + } + size += n + n, err = __PackObject(cmd, v, false) + if err != nil { + return 0, err + } + size += n + } + + return size, err +} + +// PackJson packs json data +func PackJson(cmd BufferEx, theMap map[string]interface{}) (int, error) { + return __PackJsonMap(cmd, theMap) +} + +func __PackJsonMap(cmd BufferEx, theMap map[string]interface{}) (int, error) { + size := 0 + n, err := __PackMapBegin(cmd, len(theMap)) + if err != nil { + return n, err + } + size += n + + for k, v := range theMap { + n, err := __PackString(cmd, k) + if err != nil { + return 0, err + } + size += n + n, err = __PackObject(cmd, v, false) + if err != nil { + return 0, err + } + size += n + } + + return size, err +} + +// PackMap packs any map that implements the MapIter interface +func PackMap(cmd BufferEx, theMap MapIter) (int, error) { + return __PackMap(cmd, theMap) +} + +func __PackMap(cmd BufferEx, theMap MapIter) (int, error) { + size := 0 + n, err := __PackMapBegin(cmd, theMap.Len()) + if err != nil { + return n, err + } + size += n + + n, err = theMap.PackMap(cmd) + return size + n, err +} + +func __PackMapBegin(cmd BufferEx, size int) (int, error) { + if size < 16 { + return __PackAByte(cmd, 0x80|byte(size)) + } else if size <= math.MaxUint16 { + return __PackShort(cmd, 0xde, int16(size)) + } else { + return __PackInt(cmd, 0xdf, int32(size)) + } +} + +// PackBytes backs a byte array +func PackBytes(cmd BufferEx, b []byte) (int, error) { + return __PackBytes(cmd, b) +} + +func __PackBytes(cmd BufferEx, b []byte) (int, error) { + size := 0 + n, err := __PackByteArrayBegin(cmd, len(b)+1) + if err != nil { + return n, err + } + size += n + + n, err = __PackAByte(cmd, ParticleType.BLOB) + if err != nil { + return size + n, err + } + size += n + + n, err = __PackByteArray(cmd, b) + if err != nil { + return size + n, err + } + size += n + + return size, nil +} + +func __PackByteArrayBegin(cmd BufferEx, length int) (int, error) { + if length < 32 { + return __PackAByte(cmd, 0xa0|byte(length)) + } else if length < 65536 { + return __PackShort(cmd, 0xda, int16(length)) + } else { + return __PackInt(cmd, 0xdb, int32(length)) + } +} + +func __PackObject(cmd BufferEx, obj interface{}, mapKey bool) (int, error) { + switch v := obj.(type) { + case Value: + return v.pack(cmd) + case []Value: + return ValueArray(v).pack(cmd) + case string: + return __PackString(cmd, v) + case []byte: + return __PackBytes(cmd, obj.([]byte)) + case int8: + return __PackAInt(cmd, int(v)) + case uint8: + return __PackAInt(cmd, int(v)) + case int16: + return __PackAInt(cmd, int(v)) + case uint16: + return __PackAInt(cmd, int(v)) + case int32: + return __PackAInt(cmd, int(v)) + case uint32: + return __PackAInt(cmd, int(v)) + case int: + if Buffer.Arch32Bits { + return __PackAInt(cmd, v) + } + return __PackAInt64(cmd, int64(v)) + case uint: + if Buffer.Arch32Bits { + return __PackAInt(cmd, int(v)) + } + return __PackAUInt64(cmd, uint64(v)) + case int64: + return __PackAInt64(cmd, v) + case uint64: + return __PackAUInt64(cmd, v) + case time.Time: + return __PackAInt64(cmd, v.UnixNano()) + case nil: + return __PackNil(cmd) + case bool: + return __PackBool(cmd, v) + case float32: + return __PackFloat32(cmd, v) + case float64: + return __PackFloat64(cmd, v) + case struct{}: + if mapKey { + panic(fmt.Sprintf("Maps, Slices, and bounded arrays other than Bounded Byte Arrays are not supported as Map keys. Value: %#v", v)) + } + return __PackIfcMap(cmd, map[interface{}]interface{}{}) + case []interface{}: + if mapKey { + panic(fmt.Sprintf("Maps, Slices, and bounded arrays other than Bounded Byte Arrays are not supported as Map keys. Value: %#v", v)) + } + return __PackIfcList(cmd, v) + case map[interface{}]interface{}: + if mapKey { + panic(fmt.Sprintf("Maps, Slices, and bounded arrays other than Bounded Byte Arrays are not supported as Map keys. Value: %#v", v)) + } + return __PackIfcMap(cmd, v) + case ListIter: + if mapKey { + panic(fmt.Sprintf("Maps, Slices, and bounded arrays other than Bounded Byte Arrays are not supported as Map keys. Value: %#v", v)) + } + return __PackList(cmd, obj.(ListIter)) + case MapIter: + if mapKey { + panic(fmt.Sprintf("Maps, Slices, and bounded arrays other than Bounded Byte Arrays are not supported as Map keys. Value: %#v", v)) + } + return __PackMap(cmd, obj.(MapIter)) + } + + // try to see if the object is convertible to a concrete value. + // This will be faster and much more memory efficient than reflection. + if v := tryConcreteValue(obj); v != nil { + return v.pack(cmd) + } + + if __packObjectReflect != nil { + return __packObjectReflect(cmd, obj, mapKey) + } + + panic(fmt.Sprintf("Type `%v (%s)` not supported to pack. ", obj, reflect.TypeOf(obj).String())) +} + +func __PackAUInt64(cmd BufferEx, val uint64) (int, error) { + return __PackUInt64(cmd, val) +} + +func __PackAInt64(cmd BufferEx, val int64) (int, error) { + if val >= 0 { + if val < 128 { + return __PackAByte(cmd, byte(val)) + } + + if val <= math.MaxUint8 { + return __PackByte(cmd, 0xcc, byte(val)) + } + + if val <= math.MaxUint16 { + return __PackShort(cmd, 0xcd, int16(val)) + } + + if val <= math.MaxUint32 { + return __PackInt(cmd, 0xce, int32(val)) + } + return __PackInt64(cmd, 0xd3, val) + } else { + if val >= -32 { + return __PackAByte(cmd, 0xe0|(byte(val)+32)) + } + + if val >= math.MinInt8 { + return __PackByte(cmd, 0xd0, byte(val)) + } + + if val >= math.MinInt16 { + return __PackShort(cmd, 0xd1, int16(val)) + } + + if val >= math.MinInt32 { + return __PackInt(cmd, 0xd2, int32(val)) + } + return __PackInt64(cmd, 0xd3, val) + } +} + +// PackInt64 packs an int64 +func PackInt64(cmd BufferEx, val int64) (int, error) { + return __PackAInt64(cmd, val) +} + +func __PackAInt(cmd BufferEx, val int) (int, error) { + return __PackAInt64(cmd, int64(val)) +} + +// PackString packs a string +func PackString(cmd BufferEx, val string) (int, error) { + return __PackString(cmd, val) +} + +func __PackString(cmd BufferEx, val string) (int, error) { + size := 0 + slen := len(val) + 1 + n, err := __PackByteArrayBegin(cmd, slen) + if err != nil { + return n, err + } + size += n + + if cmd != nil { + n, err = 1, cmd.WriteByte(byte(ParticleType.STRING)) + if err != nil { + return size + n, err + } + size += n + + n, err = cmd.WriteString(val) + if err != nil { + return size + n, err + } + size += n + } else { + size += 1 + len(val) + } + + return size, nil +} + +func __PackGeoJson(cmd BufferEx, val string) (int, error) { + size := 0 + slen := len(val) + 1 + n, err := __PackByteArrayBegin(cmd, slen) + if err != nil { + return n, err + } + size += n + + if cmd != nil { + n, err = 1, cmd.WriteByte(byte(ParticleType.GEOJSON)) + if err != nil { + return size + n, err + } + size += n + + n, err = cmd.WriteString(val) + if err != nil { + return size + n, err + } + size += n + } else { + size += 1 + len(val) + } + + return size, nil +} + +func __PackByteArray(cmd BufferEx, src []byte) (int, error) { + if cmd != nil { + return cmd.Write(src) + } + return len(src), nil +} + +func __PackInt64(cmd BufferEx, valType int, val int64) (int, error) { + if cmd != nil { + size, err := 1, cmd.WriteByte(byte(valType)) + if err != nil { + return size, err + } + + n, err := cmd.WriteInt64(val) + return size + n, err + } + return 1 + 8, nil +} + +// PackUInt64 packs a uint64 +func PackUInt64(cmd BufferEx, val uint64) (int, error) { + return __PackUInt64(cmd, val) +} + +func __PackUInt64(cmd BufferEx, val uint64) (int, error) { + if cmd != nil { + size, err := 1, cmd.WriteByte(byte(0xcf)) + if err != nil { + return size, err + } + + n, err := cmd.WriteInt64(int64(val)) + return size + n, err + } + return 1 + 8, nil +} + +func __PackInt(cmd BufferEx, valType int, val int32) (int, error) { + if cmd != nil { + size, err := 1, cmd.WriteByte(byte(valType)) + if err != nil { + return size, err + } + n, err := cmd.WriteInt32(val) + return size + n, err + } + return 1 + 4, nil +} + +func __PackShort(cmd BufferEx, valType int, val int16) (int, error) { + if cmd != nil { + size, err := 1, cmd.WriteByte(byte(valType)) + if err != nil { + return size, err + } + + n, err := cmd.WriteInt16(val) + return size + n, err + } + return 1 + 2, nil +} + +// This method is not compatible with MsgPack specs and is only used by aerospike client<->server +// for wire transfer only +func __PackShortRaw(cmd BufferEx, val int16) (int, error) { + if cmd != nil { + return cmd.WriteInt16(val) + } + return 2, nil +} + +func __PackByte(cmd BufferEx, valType int, val byte) (int, error) { + if cmd != nil { + size := 0 + n, err := 1, cmd.WriteByte(byte(valType)) + if err != nil { + return n, err + } + size += n + + n, err = 1, cmd.WriteByte(val) + if err != nil { + return size + n, err + } + size += n + + return size, nil + } + return 1 + 1, nil +} + +// Pack nil packs a nil value +func PackNil(cmd BufferEx) (int, error) { + return __PackNil(cmd) +} + +func __PackNil(cmd BufferEx) (int, error) { + if cmd != nil { + return 1, cmd.WriteByte(0xc0) + } + return 1, nil +} + +// Pack bool packs a bool value +func PackBool(cmd BufferEx, val bool) (int, error) { + return __PackBool(cmd, val) +} + +func __PackBool(cmd BufferEx, val bool) (int, error) { + if cmd != nil { + if val { + return 1, cmd.WriteByte(0xc3) + } + return 1, cmd.WriteByte(0xc2) + } + return 1, nil +} + +// PackFloat32 packs float32 value +func PackFloat32(cmd BufferEx, val float32) (int, error) { + return __PackFloat32(cmd, val) +} + +func __PackFloat32(cmd BufferEx, val float32) (int, error) { + if cmd != nil { + size := 0 + n, err := 1, cmd.WriteByte(0xca) + if err != nil { + return n, err + } + size += n + n, err = cmd.WriteFloat32(val) + return size + n, err + } + return 1 + 4, nil +} + +// PackFloat64 packs float64 value +func PackFloat64(cmd BufferEx, val float64) (int, error) { + return __PackFloat64(cmd, val) +} + +func __PackFloat64(cmd BufferEx, val float64) (int, error) { + if cmd != nil { + size := 0 + n, err := 1, cmd.WriteByte(0xcb) + if err != nil { + return n, err + } + size += n + n, err = cmd.WriteFloat64(val) + return size + n, err + } + return 1 + 8, nil +} + +func __PackAByte(cmd BufferEx, val byte) (int, error) { + if cmd != nil { + return 1, cmd.WriteByte(val) + } + return 1, nil +} + +// packer implements a buffered packer +type packer struct { + bytes.Buffer + tempBuffer [8]byte +} + +func newPacker() *packer { + return &packer{} +} + +// Int64ToBytes converts an int64 into slice of Bytes. +func (vb *packer) WriteInt64(num int64) (int, error) { + return vb.WriteUint64(uint64(num)) +} + +// Uint64ToBytes converts an uint64 into slice of Bytes. +func (vb *packer) WriteUint64(num uint64) (int, error) { + binary.BigEndian.PutUint64(vb.tempBuffer[:8], num) + vb.Write(vb.tempBuffer[:8]) + return 8, nil +} + +// Int32ToBytes converts an int32 to a byte slice of size 4 +func (vb *packer) WriteInt32(num int32) (int, error) { + return vb.WriteUint32(uint32(num)) +} + +// Uint32ToBytes converts an uint32 to a byte slice of size 4 +func (vb *packer) WriteUint32(num uint32) (int, error) { + binary.BigEndian.PutUint32(vb.tempBuffer[:4], num) + vb.Write(vb.tempBuffer[:4]) + return 4, nil +} + +// Int16ToBytes converts an int16 to slice of bytes +func (vb *packer) WriteInt16(num int16) (int, error) { + return vb.WriteUint16(uint16(num)) +} + +// UInt16ToBytes converts an iuint16 to slice of bytes +func (vb *packer) WriteUint16(num uint16) (int, error) { + binary.BigEndian.PutUint16(vb.tempBuffer[:2], num) + vb.Write(vb.tempBuffer[:2]) + return 2, nil +} + +func (vb *packer) WriteFloat32(float float32) (int, error) { + bits := math.Float32bits(float) + binary.BigEndian.PutUint32(vb.tempBuffer[:4], bits) + vb.Write(vb.tempBuffer[:4]) + return 4, nil +} + +func (vb *packer) WriteFloat64(float float64) (int, error) { + bits := math.Float64bits(float) + binary.BigEndian.PutUint64(vb.tempBuffer[:8], bits) + vb.Write(vb.tempBuffer[:8]) + return 8, nil +} + +func (vb *packer) WriteByte(b byte) error { + _, err := vb.Write([]byte{b}) + return err +} + +func (vb *packer) WriteString(s string) (int, error) { + // To avoid allocating memory, write the strings in small chunks + l := len(s) + const size = 128 + b := [size]byte{} + cnt := 0 + for i := 0; i < l; i++ { + b[cnt] = s[i] + cnt++ + + if cnt == size { + vb.Write(b[:]) + cnt = 0 + } + } + + if cnt > 0 { + vb.Write(b[:cnt]) + } + + return len(s), nil +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/packer_reflect.go b/vendor/github.com/aerospike/aerospike-client-go/packer_reflect.go new file mode 100644 index 00000000000..0eb7bee39d4 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/packer_reflect.go @@ -0,0 +1,75 @@ +// +build !as_performance + +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "fmt" + "reflect" +) + +func init() { + __packObjectReflect = __concretePackObjectReflect +} + +func __concretePackObjectReflect(cmd BufferEx, obj interface{}, mapKey bool) (int, error) { + // check for array and map + rv := reflect.ValueOf(obj) + switch reflect.TypeOf(obj).Kind() { + case reflect.Array, reflect.Slice: + if mapKey && reflect.TypeOf(obj).Kind() == reflect.Slice { + panic(fmt.Sprintf("Maps, Slices, and bounded arrays other than Bounded Byte Arrays are not supported as Map keys. Value: %#v", obj)) + } + // pack bounded array of bytes differently + if reflect.TypeOf(obj).Kind() == reflect.Array && reflect.TypeOf(obj).Elem().Kind() == reflect.Uint8 { + l := rv.Len() + arr := make([]byte, l) + for i := 0; i < l; i++ { + arr[i] = rv.Index(i).Interface().(uint8) + } + return __PackBytes(cmd, arr) + } + + l := rv.Len() + arr := make([]interface{}, l) + for i := 0; i < l; i++ { + arr[i] = rv.Index(i).Interface() + } + return __PackIfcList(cmd, arr) + case reflect.Map: + if mapKey { + panic(fmt.Sprintf("Maps, Slices, and bounded arrays other than Bounded Byte Arrays are not supported as Map keys. Value: %#v", obj)) + } + l := rv.Len() + amap := make(map[interface{}]interface{}, l) + for _, i := range rv.MapKeys() { + amap[i.Interface()] = rv.MapIndex(i).Interface() + } + return __PackIfcMap(cmd, amap) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return __PackObject(cmd, rv.Int(), false) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return __PackObject(cmd, rv.Uint(), false) + case reflect.Bool: + return __PackObject(cmd, rv.Bool(), false) + case reflect.String: + return __PackObject(cmd, rv.String(), false) + case reflect.Float32, reflect.Float64: + return __PackObject(cmd, rv.Float(), false) + } + + panic(fmt.Sprintf("Type `%#v` not supported to pack.", obj)) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/partition.go b/vendor/github.com/aerospike/aerospike-client-go/partition.go new file mode 100644 index 00000000000..03608e361b6 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/partition.go @@ -0,0 +1,65 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "fmt" + + Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" +) + +// Partition encapsulates partition information. +type Partition struct { + Namespace string + PartitionId int +} + +// NewPartitionByKey initializes a partition and determines the Partition Id +// from key digest automatically. +func NewPartitionByKey(key *Key) *Partition { + partition := newPartitionByKey(key) + return &partition +} + +// newPartitionByKey initializes a partition and determines the Partition Id +// from key digest automatically. It return the struct itself, and not the address +func newPartitionByKey(key *Key) Partition { + return Partition{ + Namespace: key.namespace, + + // CAN'T USE MOD directly - mod will give negative numbers. + // First AND makes positive and negative correctly, then mod. + // For any x, y : x % 2^y = x & (2^y - 1); the second method is twice as fast + PartitionId: int(Buffer.LittleBytesToInt32(key.digest[:], 0)&0xFFFF) & (_PARTITIONS - 1), + } +} + +// NewPartition generates a partition instance. +func NewPartition(namespace string, partitionId int) *Partition { + return &Partition{ + Namespace: namespace, + PartitionId: partitionId, + } +} + +// String implements the Stringer interface. +func (ptn *Partition) String() string { + return fmt.Sprintf("%s:%d", ptn.Namespace, ptn.PartitionId) +} + +// Equals checks equality of two partitions. +func (ptn *Partition) Equals(other *Partition) bool { + return ptn.PartitionId == other.PartitionId && ptn.Namespace == other.Namespace +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/partition_parser.go b/vendor/github.com/aerospike/aerospike-client-go/partition_parser.go new file mode 100644 index 00000000000..76ae31c115e --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/partition_parser.go @@ -0,0 +1,339 @@ +/* + * Copyright 2013-2017 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package aerospike + +import ( + "encoding/base64" + "fmt" + "strconv" + + . "github.com/aerospike/aerospike-client-go/logger" + . "github.com/aerospike/aerospike-client-go/types" +) + +const ( + _PartitionGeneration = "partition-generation" + _ReplicasMaster = "replicas-master" + _ReplicasAll = "replicas-all" +) + +// Parse node's master (and optionally prole) partitions. +type partitionParser struct { + pmap partitionMap + buffer []byte + partitionCount int + generation int + length int + offset int +} + +func newPartitionParser(node *Node, partitionCount int, requestProleReplicas bool) (*partitionParser, error) { + newPartitionParser := &partitionParser{ + partitionCount: partitionCount, + } + + // Send format 1: partition-generation\nreplicas-master\n + // Send format 2: partition-generation\nreplicas-all\n + command := _ReplicasMaster + if requestProleReplicas { + command = _ReplicasAll + } + info, err := node.requestRawInfo(_PartitionGeneration, command) + if err != nil { + return nil, err + } + + newPartitionParser.buffer = info.msg.Data + newPartitionParser.length = len(info.msg.Data) + if newPartitionParser.length == 0 { + return nil, NewAerospikeError(PARSE_ERROR, fmt.Sprintf("Partition info is empty")) + } + + newPartitionParser.generation, err = newPartitionParser.parseGeneration() + if err != nil { + return nil, err + } + + newPartitionParser.pmap = make(partitionMap) + + if requestProleReplicas { + err = newPartitionParser.parseReplicasAll(node) + } else { + err = newPartitionParser.parseReplicasMaster(node) + } + + if err != nil { + return nil, err + } + + return newPartitionParser, nil +} + +func (pp *partitionParser) getGeneration() int { + return pp.generation +} + +func (pp *partitionParser) getPartitionMap() partitionMap { + return pp.pmap +} + +func (pp *partitionParser) parseGeneration() (int, error) { + if err := pp.expectName(_PartitionGeneration); err != nil { + return -1, err + } + + begin := pp.offset + for pp.offset < pp.length { + if pp.buffer[pp.offset] == '\n' { + s := string(pp.buffer[begin:pp.offset]) + pp.offset++ + return strconv.Atoi(s) + } + pp.offset++ + } + return -1, NewAerospikeError(PARSE_ERROR, fmt.Sprintf("Failed to find partition-generation value")) +} + +func (pp *partitionParser) parseReplicasMaster(node *Node) error { + // Use low-level info methods and parse byte array directly for maximum performance. + // Receive format: replicas-master\t:;:...\n + if err := pp.expectName(_ReplicasMaster); err != nil { + return err + } + + begin := pp.offset + + for pp.offset < pp.length { + if pp.buffer[pp.offset] == ':' { + // Parse namespace. + namespace := string(pp.buffer[begin:pp.offset]) + + if len(namespace) <= 0 || len(namespace) >= 32 { + response := pp.getTruncatedResponse() + return NewAerospikeError(PARSE_ERROR, fmt.Sprintf("Invalid partition namespace `%s` response: `%s`", namespace, response)) + } + pp.offset++ + begin = pp.offset + + // Parse partition bitmap. + for pp.offset < pp.length { + b := pp.buffer[pp.offset] + + if b == ';' || b == '\n' { + break + } + pp.offset++ + } + + if pp.offset == begin { + response := pp.getTruncatedResponse() + return NewAerospikeError(PARSE_ERROR, fmt.Sprintf("Empty partition id for namespace `%s` response: `%s`", namespace, response)) + } + + replicaArray := pp.pmap[namespace] + + if replicaArray == nil { + replicaArray = make([][]*Node, 1) + replicaArray[0] = make([]*Node, pp.partitionCount) + pp.pmap[namespace] = replicaArray + } + + if err := pp.decodeBitmap(node, replicaArray[0], begin); err != nil { + return err + } + pp.offset++ + begin = pp.offset + } else { + pp.offset++ + } + } + + return nil +} + +func (pp *partitionParser) parseReplicasAll(node *Node) error { + // Use low-level info methods and parse byte array directly for maximum performance. + // Receive format: replicas-all\t + // :,,...; + // :,,...;\n + if err := pp.expectName(_ReplicasAll); err != nil { + return err + } + + begin := pp.offset + + for pp.offset < pp.length { + if pp.buffer[pp.offset] == ':' { + // Parse namespace. + namespace := string(pp.buffer[begin:pp.offset]) + + if len(namespace) <= 0 || len(namespace) >= 32 { + response := pp.getTruncatedResponse() + return NewAerospikeError(PARSE_ERROR, fmt.Sprintf("Invalid partition namespace `%s` response: `%s`", namespace, response)) + } + pp.offset++ + begin = pp.offset + + // Parse replica count. + for pp.offset < pp.length { + b := pp.buffer[pp.offset] + + if b == ',' { + break + } + pp.offset++ + } + + replicaCount, err := strconv.Atoi(string(pp.buffer[begin:pp.offset])) + if err != nil { + return err + } + + // Ensure replicaArray is correct size. + replicaArray := pp.pmap[namespace] + + if replicaArray == nil { + // Create new replica array. + replicaArray = make([][]*Node, replicaCount) + + for i := 0; i < replicaCount; i++ { + replicaArray[i] = make([]*Node, pp.partitionCount) + } + + pp.pmap[namespace] = replicaArray + } else if len(replicaArray) != replicaCount { + Logger.Info("Namespace `%s` replication factor changed from `%d` to `%d` ", namespace, len(replicaArray), replicaCount) + + // Resize replica array. + replicaTarget := make([][]*Node, replicaCount) + + if len(replicaArray) < replicaCount { + i := 0 + + // Copy existing entries. + for ; i < len(replicaArray); i++ { + replicaTarget[i] = replicaArray[i] + } + + // Create new entries. + for ; i < replicaCount; i++ { + replicaTarget[i] = make([]*Node, pp.partitionCount) + } + } else { + // Copy existing entries. + for i := 0; i < replicaCount; i++ { + replicaTarget[i] = replicaArray[i] + } + } + + replicaArray = replicaTarget + pp.pmap[namespace] = replicaArray + } + + // Parse partition bitmaps. + for i := 0; i < replicaCount; i++ { + pp.offset++ + begin = pp.offset + + // Find bitmap endpoint + for pp.offset < pp.length { + b := pp.buffer[pp.offset] + + if b == ',' || b == ';' { + break + } + pp.offset++ + } + + if pp.offset == begin { + response := pp.getTruncatedResponse() + return NewAerospikeError(PARSE_ERROR, fmt.Sprintf("Empty partition id for namespace `%s` response: `%s`", namespace, response)) + } + + if err := pp.decodeBitmap(node, replicaArray[i], begin); err != nil { + return err + } + } + pp.offset++ + begin = pp.offset + } else { + pp.offset++ + } + } + + return nil +} + +func (pp *partitionParser) decodeBitmap(node *Node, nodeArray []*Node, begin int) error { + restoreBuffer, err := base64.StdEncoding.DecodeString(string(pp.buffer[begin:pp.offset])) + if err != nil { + return err + } + + for i := 0; i < pp.partitionCount; i++ { + nodeOld := nodeArray[i] + + if (restoreBuffer[i>>3] & (0x80 >> uint(i&7))) != 0 { + // Node owns this partition. + if nodeOld != nil && nodeOld != node { + // Force previously mapped node to refresh it's partition map on next cluster tend. + nodeOld.partitionGeneration.Set(-1) + } + + // Use lazy set because there is only one producer thread. In addition, + // there is a one second delay due to the cluster tend polling interval. + // An extra millisecond for a node change will not make a difference and + // overall performance is improved. + nodeArray[i] = node + } else { + // Node does not own partition. + if node == nodeOld { + // Must erase previous map. + nodeArray[i] = nil + } + } + } + + return nil +} + +func (pp *partitionParser) expectName(name string) error { + begin := pp.offset + + for pp.offset < pp.length { + if pp.buffer[pp.offset] == '\t' { + s := string(pp.buffer[begin:pp.offset]) + if name == s { + pp.offset++ + return nil + } + break + } + pp.offset++ + } + + return NewAerospikeError(PARSE_ERROR, fmt.Sprintf("Failed to find `%s`", name)) +} + +func (pp *partitionParser) getTruncatedResponse() string { + max := pp.length + if max > 200 { + max = 200 + } + return string(pp.buffer[0:max]) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/peers.go b/vendor/github.com/aerospike/aerospike-client-go/peers.go new file mode 100644 index 00000000000..580839cad7c --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/peers.go @@ -0,0 +1,101 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "sync" + + // . "github.com/aerospike/aerospike-client-go/logger" + . "github.com/aerospike/aerospike-client-go/types/atomic" +) + +type peers struct { + _peers map[string]*peer + _hosts map[Host]struct{} + _nodes map[string]*Node + refreshCount AtomicInt + usePeers AtomicBool + genChanged AtomicBool + + mutex sync.RWMutex +} + +func newPeers(peerCapacity int, addCapacity int) *peers { + return &peers{ + _peers: make(map[string]*peer, peerCapacity), + _hosts: make(map[Host]struct{}, addCapacity), + _nodes: make(map[string]*Node, addCapacity), + usePeers: *NewAtomicBool(true), + genChanged: *NewAtomicBool(true), + } +} + +func (ps *peers) hostExists(host Host) bool { + ps.mutex.RLock() + defer ps.mutex.RUnlock() + _, exists := ps._hosts[host] + return exists +} + +func (ps *peers) addHost(host Host) { + ps.mutex.Lock() + defer ps.mutex.Unlock() + ps._hosts[host] = struct{}{} +} + +func (ps *peers) addNode(name string, node *Node) { + ps.mutex.Lock() + defer ps.mutex.Unlock() + ps._nodes[name] = node +} + +func (ps *peers) nodeByName(name string) *Node { + ps.mutex.RLock() + defer ps.mutex.RUnlock() + return ps._nodes[name] +} + +func (ps *peers) appendPeers(peers []*peer) { + ps.mutex.Lock() + defer ps.mutex.Unlock() + + for _, peer := range peers { + ps._peers[peer.nodeName] = peer + } + +} + +func (ps *peers) peers() []*peer { + ps.mutex.RLock() + defer ps.mutex.RUnlock() + + res := make([]*peer, 0, len(ps._peers)) + for _, peer := range ps._peers { + res = append(res, peer) + } + return res +} + +func (ps *peers) nodes() map[string]*Node { + ps.mutex.RLock() + defer ps.mutex.RUnlock() + return ps._nodes +} + +type peer struct { + nodeName string + tlsName string + hosts []*Host +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/peers_parser.go b/vendor/github.com/aerospike/aerospike-client-go/peers_parser.go new file mode 100644 index 00000000000..741c1d5a2ef --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/peers_parser.go @@ -0,0 +1,335 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + // . "github.com/aerospike/aerospike-client-go/logger" + + "io" + "strconv" + + . "github.com/aerospike/aerospike-client-go/types" +) + +var aeroerr error = NewAerospikeError(PARSE_ERROR, "Error parsing peers list.") + +func parsePeers(cluster *Cluster, node *Node) (*peerListParser, error) { + var cmd string + if cluster.clientPolicy.TlsConfig != nil { + if cluster.clientPolicy.UseServicesAlternate { + cmd = "peers-tls-alt" + } else { + cmd = "peers-tls-std" + } + } else { + if cluster.clientPolicy.UseServicesAlternate { + cmd = "peers-clear-alt" + } else { + cmd = "peers-clear-std" + } + } + + info, err := node.RequestInfo(cmd) + if err != nil { + return nil, err + } + + peersStr, exists := info[cmd] + if !exists { + return nil, NewAerospikeError(PARSE_ERROR, "Info Command response was empty.") + } + + p := peerListParser{buf: []byte(peersStr)} + if err := p.Parse(); err != nil { + return nil, err + } + + return &p, nil +} + +type peerListParser struct { + buf []byte + pos int + + defPort *int64 + gen *int64 + peers []*peer +} + +func (p *peerListParser) generation() int64 { + if p.gen != nil { + return *p.gen + } + return 0 +} + +func (p *peerListParser) Expect(ch byte) bool { + if p.pos == len(p.buf) { + return false + } + + if p.buf[p.pos] == ch { + p.pos++ + return true + } + return false +} + +func (p *peerListParser) readByte() *byte { + if p.pos == len(p.buf) { + return nil + } + + ch := p.buf[p.pos] + p.pos++ + return &ch +} + +func (p *peerListParser) PeekByte() *byte { + if p.pos == len(p.buf) { + return nil + } + + ch := p.buf[p.pos] + return &ch +} + +func (p *peerListParser) readInt64() (*int64, error) { + if p.pos == len(p.buf) { + return nil, io.EOF + } + + if p.buf[p.pos] == ',' { + return nil, nil + } + + begin := p.pos + for p.pos < len(p.buf) { + ch := p.buf[p.pos] + if ch == ',' { + break + } + p.pos++ + } + + num, err := strconv.ParseInt(string(p.buf[begin:p.pos]), 10, 64) + if err != nil { + return nil, err + } + return &num, nil +} + +func (p *peerListParser) readString() (string, error) { + if p.pos == len(p.buf) { + return "", io.EOF + } + + if p.buf[p.pos] == ',' { + return "", nil + } + + begin := p.pos + bracket := p.buf[p.pos] == '[' + for p.pos < len(p.buf) { + ch := p.buf[p.pos] + if ch == ',' { + break + } + + if ch == ']' { + if !bracket { + break + } + bracket = false + } + p.pos++ + } + + return string(p.buf[begin:p.pos]), nil +} + +func (p *peerListParser) ParseHost(host string) (*Host, error) { + ppos := -1 + bpos := -1 + for i := 0; i < len(host); i++ { + switch host[i] { + case ':': + ppos = i + case ']': + ppos = -1 + bpos = i + } + } + + port := 0 + if p.defPort != nil { + port = int(*p.defPort) + } + var err error + if ppos >= 0 { + portStr := host[ppos+1:] + port, err = strconv.Atoi(portStr) + if err != nil { + return nil, err + } + } + + var addr string + if bpos >= 0 { + addr = host[1:bpos] + } else { + if ppos >= 0 { + addr = host[:ppos] + } else { + addr = host + } + } + + return NewHost(addr, port), nil +} + +func (p *peerListParser) readHosts(tlsName string) ([]*Host, error) { + if !p.Expect('[') { + return nil, aeroerr + } + + hostList := []*Host{} + for { + hostStr, err := p.readString() + if err != nil { + return nil, err + } + + if hostStr == "" { + break + } + + host, err := p.ParseHost(hostStr) + if err != nil { + return nil, aeroerr + } + + host.TLSName = tlsName + hostList = append(hostList, host) + + if !p.Expect(',') { + break + } + } + + if !p.Expect(']') { + return nil, aeroerr + } + + return hostList, nil +} + +func (p *peerListParser) readPeer() (*peer, error) { + if !p.Expect('[') { + return nil, nil + } + + nodeName, err := p.readString() + if err != nil { + return nil, err + } + + if !p.Expect(',') { + return nil, aeroerr + } + tlsName, err := p.readString() + if err != nil { + return nil, err + } + + if !p.Expect(',') { + return nil, aeroerr + } + + hostList, err := p.readHosts(tlsName) + if err != nil { + return nil, err + } + + if !p.Expect(']') { + return nil, aeroerr + } + + nodeData := &peer{nodeName: nodeName, tlsName: tlsName, hosts: hostList} + return nodeData, nil +} + +func (p *peerListParser) readNodeList() ([]*peer, error) { + ch := p.readByte() + if ch == nil { + return nil, nil + } + + if *ch != '[' { + return nil, aeroerr + } + + nodeList := []*peer{} + for { + node, err := p.readPeer() + if err != nil { + return nil, err + } + + if node == nil { + break + } + + nodeList = append(nodeList, node) + + if !p.Expect(',') { + break + } + } + + if !p.Expect(']') { + return nil, aeroerr + } + + return nodeList, nil +} + +func (p *peerListParser) Parse() error { + var err error + p.gen, err = p.readInt64() + if err != nil { + return err + } + + if !p.Expect(',') { + return aeroerr + } + + p.defPort, err = p.readInt64() + if err != nil { + return err + } + + if !p.Expect(',') { + return aeroerr + } + + p.peers, err = p.readNodeList() + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/pkg/bcrypt/LICENSE b/vendor/github.com/aerospike/aerospike-client-go/pkg/bcrypt/LICENSE new file mode 100644 index 00000000000..555bb71c2d6 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/pkg/bcrypt/LICENSE @@ -0,0 +1,29 @@ +Copyright (c) 2011 James Keane . All rights reserved. +Copyright (c) 2006 Damien Miller . +Copyright (c) 2011 ZooWar.com, All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of weekendlogic nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/aerospike/aerospike-client-go/pkg/bcrypt/README b/vendor/github.com/aerospike/aerospike-client-go/pkg/bcrypt/README new file mode 100644 index 00000000000..a4d638abe9f --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/pkg/bcrypt/README @@ -0,0 +1,46 @@ +Installation: + goinstall github.com/jameskeane/bcrypt + +Example use: + package main + + import ( + "fmt" + "github.com/jameskeane/bcrypt" + ) + + var password = "WyWihatdyd?frub1" + var bad_password = "just a wild guess" + + func main() { + // generate a random salt with default rounds of complexity + salt, _ := bcrypt.Salt() + + // generate a random salt with 10 rounds of complexity + salt, _ = bcrypt.Salt(10) + + // hash and verify a password with random salt + hash, _ := bcrypt.Hash(password) + if bcrypt.Match(password, hash) { + fmt.Println("They match") + } + + // hash and verify a password with a static salt + hash, _ = bcrypt.Hash(password, salt) + if bcrypt.Match(password, hash) { + fmt.Println("They match") + } + + // verify a random password fails to match the hashed password + if !bcrypt.Match(bad_password, hash) { + fmt.Println("They don't match") + } + } + +Todo: + grep 'TODO' * -r + +Notes: + * This library is derived from jBcrypt by Damien Miller + * bcrypt_test.go is from ZooWar.com + diff --git a/vendor/github.com/aerospike/aerospike-client-go/pkg/bcrypt/bcrypt.go b/vendor/github.com/aerospike/aerospike-client-go/pkg/bcrypt/bcrypt.go new file mode 100644 index 00000000000..fe84921ac9f --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/pkg/bcrypt/bcrypt.go @@ -0,0 +1,190 @@ +package bcrypt + +import ( + "bytes" + "crypto/rand" + "crypto/subtle" + "encoding/base64" + "errors" + "strconv" + "strings" +) + +var ( + InvalidRounds = errors.New("bcrypt: Invalid rounds parameter") + InvalidSalt = errors.New("bcrypt: Invalid salt supplied") +) + +const ( + MaxRounds = 31 + MinRounds = 4 + DefaultRounds = 12 + SaltLen = 16 + BlowfishRounds = 16 +) + +var enc = base64.NewEncoding("./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") + +// Helper function to build the bcrypt hash string +// payload takes : +// * []byte -> which it base64 encodes it (trims padding "=") and writes it to the buffer +// * string -> which it writes straight to the buffer +func build_bcrypt_str(minor byte, rounds uint, payload ...interface{}) []byte { + rs := bytes.NewBuffer(make([]byte, 0, 61)) + rs.WriteString("$2") + if minor >= 'a' { + rs.WriteByte(minor) + } + + rs.WriteByte('$') + if rounds < 10 { + rs.WriteByte('0') + } + + rs.WriteString(strconv.FormatUint(uint64(rounds), 10)) + rs.WriteByte('$') + for _, p := range payload { + if pb, ok := p.([]byte); ok { + rs.WriteString(strings.TrimRight(enc.EncodeToString(pb), "=")) + } else if ps, ok := p.(string); ok { + rs.WriteString(ps) + } + } + return rs.Bytes() +} + +// Salt generation +func Salt(rounds ...int) (string, error) { + rb, err := SaltBytes(rounds...) + return string(rb), err +} + +func SaltBytes(rounds ...int) (salt []byte, err error) { + r := DefaultRounds + if len(rounds) > 0 { + r = rounds[0] + if r < MinRounds || r > MaxRounds { + return nil, InvalidRounds + } + } + + rnd := make([]byte, SaltLen) + read, err := rand.Read(rnd) + if read != SaltLen || err != nil { + return nil, err + } + + return build_bcrypt_str('a', uint(r), rnd), nil +} + +func consume(r *bytes.Buffer, b byte) bool { + got, err := r.ReadByte() + if err != nil { + return false + } + if got != b { + r.UnreadByte() + return false + } + + return true +} + +func Hash(password string, salt ...string) (ps string, err error) { + var s []byte + var pb []byte + + if len(salt) == 0 { + s, err = SaltBytes() + if err != nil { + return + } + } else if len(salt) > 0 { + s = []byte(salt[0]) + } + + pb, err = HashBytes([]byte(password), s) + return string(pb), err +} + +func HashBytes(password []byte, salt ...[]byte) (hash []byte, err error) { + var s []byte + + if len(salt) == 0 { + s, err = SaltBytes() + if err != nil { + return + } + } else if len(salt) > 0 { + s = salt[0] + } + + // TODO: use a regex? I hear go has bad regex performance a simple FSM seems faster + // "^\\$2([a-z]?)\\$([0-3][0-9])\\$([\\./A-Za-z0-9]{22}+)" + + // Ok, extract the required information + minor := byte(0) + sr := bytes.NewBuffer(s) + + if !consume(sr, '$') || !consume(sr, '2') { + return nil, InvalidSalt + } + + if !consume(sr, '$') { + minor, _ = sr.ReadByte() + if minor != 'a' || !consume(sr, '$') { + return nil, InvalidSalt + } + } + + rounds_bytes := make([]byte, 2) + read, err := sr.Read(rounds_bytes) + if err != nil || read != 2 { + return nil, InvalidSalt + } + + if !consume(sr, '$') { + return nil, InvalidSalt + } + + var rounds64 uint64 + rounds64, err = strconv.ParseUint(string(rounds_bytes), 10, 0) + if err != nil { + return nil, InvalidSalt + } + + rounds := uint(rounds64) + + // TODO: can't we use base64.NewDecoder(enc, sr) ? + salt_bytes := make([]byte, 22) + read, err = sr.Read(salt_bytes) + if err != nil || read != 22 { + return nil, InvalidSalt + } + + var saltb []byte + // encoding/base64 expects 4 byte blocks padded, since bcrypt uses only 22 bytes we need to go up + saltb, err = enc.DecodeString(string(salt_bytes) + "==") + if err != nil { + return nil, err + } + + // cipher expects null terminated input (go initializes everything with zero values so this works) + password_term := make([]byte, len(password)+1) + copy(password_term, password) + + hashed := crypt_raw(password_term, saltb[:SaltLen], rounds) + return build_bcrypt_str(minor, rounds, string(salt_bytes), hashed[:len(bf_crypt_ciphertext)*4-1]), nil +} + +func Match(password, hash string) bool { + return MatchBytes([]byte(password), []byte(hash)) +} + +func MatchBytes(password []byte, hash []byte) bool { + h, err := HashBytes(password, hash) + if err != nil { + return false + } + return subtle.ConstantTimeCompare(h, hash) == 1 +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/pkg/bcrypt/cipher.go b/vendor/github.com/aerospike/aerospike-client-go/pkg/bcrypt/cipher.go new file mode 100644 index 00000000000..aff6d9dfe6e --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/pkg/bcrypt/cipher.go @@ -0,0 +1,415 @@ +package bcrypt + +var p_orig = [18]uint{ + 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, + 0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89, + 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, + 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, + 0x9216d5d9, 0x8979fb1b, +} + +var s_orig = [1024]uint{ + 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, + 0xb8e1afed, 0x6a267e96, 0xba7c9045, 0xf12c7f99, + 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, + 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, + 0x0d95748f, 0x728eb658, 0x718bcd58, 0x82154aee, + 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, + 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, + 0x8e79dcb0, 0x603a180e, 0x6c9e0e8b, 0xb01e8a3e, + 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, + 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, + 0x55ca396a, 0x2aab10b6, 0xb4cc5c34, 0x1141e8ce, + 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, + 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, + 0xafd6ba33, 0x6c24cf5c, 0x7a325381, 0x28958677, + 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, + 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, + 0xef845d5d, 0xe98575b1, 0xdc262302, 0xeb651b88, + 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, + 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, + 0x21c66842, 0xf6e96c9a, 0x670c9c61, 0xabd388f0, + 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, + 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, + 0xa1f1651d, 0x39af0176, 0x66ca593e, 0x82430e88, + 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, + 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, + 0x4ed3aa62, 0x363f7706, 0x1bfedf72, 0x429b023d, + 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, + 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, + 0xe3fe501a, 0xb6794c3b, 0x976ce0bd, 0x04c006ba, + 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, + 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, + 0x6dfc511f, 0x9b30952c, 0xcc814544, 0xaf5ebd09, + 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, + 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, + 0x5579c0bd, 0x1a60320a, 0xd6a100c6, 0x402c7279, + 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, + 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, + 0x323db5fa, 0xfd238760, 0x53317b48, 0x3e00df82, + 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, + 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, + 0x695b27b0, 0xbbca58c8, 0xe1ffa35d, 0xb8f011a0, + 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, + 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, + 0xe1ddf2da, 0xa4cb7e33, 0x62fb1341, 0xcee4c6e8, + 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, + 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, + 0xd08ed1d0, 0xafc725e0, 0x8e3c5b2f, 0x8e7594b7, + 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, + 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, + 0x2f2f2218, 0xbe0e1777, 0xea752dfe, 0x8b021fa1, + 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, + 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, + 0x165fa266, 0x80957705, 0x93cc7314, 0x211a1477, + 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, + 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, + 0x00250e2d, 0x2071b35e, 0x226800bb, 0x57b8e0af, + 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, + 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, + 0x83260376, 0x6295cfa9, 0x11c81968, 0x4e734a41, + 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, + 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, + 0x08ba6fb5, 0x571be91f, 0xf296ec6b, 0x2a0dd915, + 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, + 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, + 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, + 0xad6ea6b0, 0x49a7df7d, 0x9cee60b8, 0x8fedb266, + 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, + 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, + 0x3f54989a, 0x5b429d65, 0x6b8fe4d6, 0x99f73fd6, + 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, + 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, + 0x09686b3f, 0x3ebaefc9, 0x3c971814, 0x6b6a70a1, + 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, + 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, + 0xb03ada37, 0xf0500c0d, 0xf01c1f04, 0x0200b3ff, + 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, + 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, + 0x3ae5e581, 0x37c2dadc, 0xc8b57634, 0x9af3dda7, + 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, + 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, + 0x4e548b38, 0x4f6db908, 0x6f420d03, 0xf60a04bf, + 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, + 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, + 0x5512721f, 0x2e6b7124, 0x501adde6, 0x9f84cd87, + 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, + 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, + 0xef1c1847, 0x3215d908, 0xdd433b37, 0x24c2ba16, + 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, + 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, + 0x043556f1, 0xd7a3c76b, 0x3c11183b, 0x5924a509, + 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, + 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, + 0x771fe71c, 0x4e3d06fa, 0x2965dcb9, 0x99e71d0f, + 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, + 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, + 0xf2f74ea7, 0x361d2b3d, 0x1939260f, 0x19c27960, + 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, + 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, + 0xc332ddef, 0xbe6c5aa5, 0x65582185, 0x68ab9802, + 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, + 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, + 0x13cca830, 0xeb61bd96, 0x0334fe1e, 0xaa0363cf, + 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, + 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, + 0x648b1eaf, 0x19bdf0ca, 0xa02369b9, 0x655abb50, + 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, + 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, + 0xf837889a, 0x97e32d77, 0x11ed935f, 0x16681281, + 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, + 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, + 0xcdb30aeb, 0x532e3054, 0x8fd948e4, 0x6dbc3128, + 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, + 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, + 0x45eee2b6, 0xa3aaabea, 0xdb6c4f15, 0xfacb4fd0, + 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, + 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, + 0xcf62a1f2, 0x5b8d2646, 0xfc8883a0, 0xc1c7b6a3, + 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, + 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, + 0x58428d2a, 0x0c55f5ea, 0x1dadf43e, 0x233f7061, + 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, + 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, + 0xa6078084, 0x19f8509e, 0xe8efd855, 0x61d99735, + 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, + 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, + 0xdb73dbd3, 0x105588cd, 0x675fda79, 0xe3674340, + 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, + 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, + 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, + 0x411520f7, 0x7602d4f7, 0xbcf46b2e, 0xd4a20068, + 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, + 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, + 0x4d95fc1d, 0x96b591af, 0x70f4ddd3, 0x66a02f45, + 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, + 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, + 0x28507825, 0x530429f4, 0x0a2c86da, 0xe9b66dfb, + 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, + 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, + 0xaace1e7c, 0xd3375fec, 0xce78a399, 0x406b2a42, + 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, + 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, + 0x3a6efa74, 0xdd5b4332, 0x6841e7f7, 0xca7820fb, + 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, + 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, + 0x55a867bc, 0xa1159a58, 0xcca92963, 0x99e1db33, + 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, + 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, + 0x95c11548, 0xe4c66d22, 0x48c1133f, 0xc70f86dc, + 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, + 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, + 0x257b7834, 0x602a9c60, 0xdff8e8a3, 0x1f636c1b, + 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, + 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, + 0x85b2a20e, 0xe6ba0d99, 0xde720c8c, 0x2da2f728, + 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, + 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, + 0x0a476341, 0x992eff74, 0x3a6f6eab, 0xf4f8fd37, + 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, + 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, + 0xf1290dc7, 0xcc00ffa3, 0xb5390f92, 0x690fed0b, + 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, + 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, + 0x37392eb3, 0xcc115979, 0x8026e297, 0xf42e312d, + 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, + 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, + 0x1a6b1018, 0x11caedfa, 0x3d25bdd8, 0xe2e1c3c9, + 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, + 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, + 0x9dbc8057, 0xf0f7c086, 0x60787bf8, 0x6003604d, + 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, + 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, + 0x77a057be, 0xbde8ae24, 0x55464299, 0xbf582e61, + 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, + 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, + 0x7aeb2661, 0x8b1ddf84, 0x846a0e79, 0x915f95e2, + 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, + 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, + 0xb77f19b6, 0xe0a9dc09, 0x662d09a1, 0xc4324633, + 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, + 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, + 0xdcb7da83, 0x573906fe, 0xa1e2ce9b, 0x4fcd7f52, + 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, + 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, + 0xf0177a28, 0xc0f586e0, 0x006058aa, 0x30dc7d62, + 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, + 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, + 0x6f05e409, 0x4b7c0188, 0x39720a3d, 0x7c927c24, + 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, + 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, + 0x1e50ef5e, 0xb161e6f8, 0xa28514d9, 0x6c51133c, + 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, + 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, + 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, + 0x5cb0679e, 0x4fa33742, 0xd3822740, 0x99bc9bbe, + 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, + 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, + 0x5748ab2f, 0xbc946e79, 0xc6a376d2, 0x6549c2c8, + 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, + 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, + 0xa1fad5f0, 0x6a2d519a, 0x63ef8ce2, 0x9a86ee22, + 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, + 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, + 0x2826a2f9, 0xa73a3ae1, 0x4ba99586, 0xef5562e9, + 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, + 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, + 0xe990fd5a, 0x9e34d797, 0x2cf0b7d9, 0x022b8b51, + 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, + 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, + 0xe029ac71, 0xe019a5e6, 0x47b0acfd, 0xed93fa9b, + 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, + 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, + 0x15056dd4, 0x88f46dba, 0x03a16125, 0x0564f0bd, + 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, + 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, + 0x7533d928, 0xb155fdf5, 0x03563482, 0x8aba3cbb, + 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, + 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, + 0xea7a90c2, 0xfb3e7bce, 0x5121ce64, 0x774fbe32, + 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, + 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, + 0xb39a460a, 0x6445c0dd, 0x586cdecf, 0x1c20c8ae, + 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, + 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, + 0x72eacea8, 0xfa6484bb, 0x8d6612ae, 0xbf3c6f47, + 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, + 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, + 0x4040cb08, 0x4eb4e2cc, 0x34d2466a, 0x0115af84, + 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, + 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, + 0x611560b1, 0xe7933fdc, 0xbb3a792b, 0x344525bd, + 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, + 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, + 0x1a908749, 0xd44fbd9a, 0xd0dadecb, 0xd50ada38, + 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, + 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, + 0xbf97222c, 0x15e6fc2a, 0x0f91fc71, 0x9b941525, + 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, + 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, + 0xe0ec6e0e, 0x1698db3b, 0x4c98a0be, 0x3278e964, + 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, + 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, + 0xdf359f8d, 0x9b992f2e, 0xe60b6f47, 0x0fe3f11d, + 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, + 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, + 0xf523f357, 0xa6327623, 0x93a83531, 0x56cccd02, + 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, + 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, + 0xe6c6c7bd, 0x327a140a, 0x45e1d006, 0xc3f27b9a, + 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, + 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, + 0x53113ec0, 0x1640e3d3, 0x38abbd60, 0x2547adf0, + 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, + 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, + 0x1948c25c, 0x02fb8a8c, 0x01c36ae4, 0xd6ebe1f9, + 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, + 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, +} + +var bf_crypt_ciphertext = [6]uint{ + 0x4f727068, 0x65616e42, 0x65686f6c, + 0x64657253, 0x63727944, 0x6f756274, +} + +type cipher struct { + P [18]uint + S [1024]uint + data [6]uint +} + +func (c *cipher) encipher(lr []uint, off int) { + l := lr[off] ^ c.P[0] + r := lr[off+1] + + for i := 0; i <= BlowfishRounds-2; i += 2 { + // Feistel substitution on left and right word respectively + r ^= (((c.S[(l>>24)&0xff] + c.S[0x100|((l>>16)&0xff)]) ^ c.S[0x200|((l>>8)&0xff)]) + c.S[0x300|(l&0xff)]) ^ c.P[i+1] + l ^= (((c.S[(r>>24)&0xff] + c.S[0x100|((r>>16)&0xff)]) ^ c.S[0x200|((r>>8)&0xff)]) + c.S[0x300|(r&0xff)]) ^ c.P[i+2] + } + + lr[off] = r ^ c.P[BlowfishRounds+1] + lr[off+1] = l +} + +/** + * Cycically extract a word of key material + * @param data the string to extract the data from + * @param off the current offset into the data + * @return the next word of material from data and the next offset into the data + */ +func streamtoword(data []byte, off int) (uint, int) { + var word uint + for i := 0; i < 4; i++ { + word = (word << 8) | uint(data[off]&0xff) + off = (off + 1) % len(data) + } + + return word, off +} + +/** + * Key the Blowfish cipher + * @param key an array containing the key + */ +func (c *cipher) key(key []byte) { + var word uint + off := 0 + lr := []uint{0, 0} + plen := len(c.P) + slen := len(c.S) + + for i := 0; i < plen; i++ { + word, off = streamtoword(key, off) + c.P[i] = c.P[i] ^ word + } + + for i := 0; i < plen; i += 2 { + c.encipher(lr, 0) + c.P[i] = lr[0] + c.P[i+1] = lr[1] + } + + for i := 0; i < slen; i += 2 { + c.encipher(lr, 0) + c.S[i] = lr[0] + c.S[i+1] = lr[1] + } +} + +/** + * Perform the "enhanced key schedule" step described by + * Provos and Mazieres in "A Future-Adaptable Password Scheme" + * http://www.openbsd.org/papers/bcrypt-paper.ps + * @param data salt information + * @param key password information + */ +func (c *cipher) ekskey(data []byte, key []byte) { + var word uint + koff := 0 + doff := 0 + lr := []uint{0, 0} + plen := len(c.P) + slen := len(c.S) + + for i := 0; i < plen; i++ { + word, koff = streamtoword(key, koff) + c.P[i] = c.P[i] ^ word + } + + for i := 0; i < plen; i += 2 { + word, doff = streamtoword(data, doff) + lr[0] ^= word + word, doff = streamtoword(data, doff) + lr[1] ^= word + c.encipher(lr, 0) + c.P[i] = lr[0] + c.P[i+1] = lr[1] + } + + for i := 0; i < slen; i += 2 { + word, doff = streamtoword(data, doff) + lr[0] ^= word + word, doff = streamtoword(data, doff) + lr[1] ^= word + c.encipher(lr, 0) + c.S[i] = lr[0] + c.S[i+1] = lr[1] + } +} + +/** + * Perform the central password hashing step in the + * bcrypt scheme + * @param password the password to hash + * @param salt the binary salt to hash with the password + * @param log_rounds the binary logarithm of the number + * of rounds of hashing to apply + * @return an array containing the binary hashed password + */ +func crypt_raw(password []byte, salt []byte, log_rounds uint) []byte { + c := &cipher{P: p_orig, S: s_orig, data: bf_crypt_ciphertext} + + rounds := 1 << log_rounds + c.ekskey(salt, password) + for i := 0; i < rounds; i++ { + c.key(password) + c.key(salt) + } + + for i := 0; i < 64; i++ { + for j := 0; j < (6 >> 1); j++ { + c.encipher(c.data[:], j<<1) + } + } + + ret := make([]byte, 24) + for i := 0; i < 6; i++ { + k := i << 2 + ret[k] = (byte)((c.data[i] >> 24) & 0xff) + ret[k+1] = (byte)((c.data[i] >> 16) & 0xff) + ret[k+2] = (byte)((c.data[i] >> 8) & 0xff) + ret[k+3] = (byte)(c.data[i] & 0xff) + } + return ret +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/pkg/ripemd160/ripemd160.go b/vendor/github.com/aerospike/aerospike-client-go/pkg/ripemd160/ripemd160.go new file mode 100644 index 00000000000..ccb09c62e03 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/pkg/ripemd160/ripemd160.go @@ -0,0 +1,121 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ripemd160 implements the RIPEMD-160 hash algorithm. +package ripemd160 + +import "hash" + +// RIPEMD-160 is designed by by Hans Dobbertin, Antoon Bosselaers, and Bart +// Preneel with specifications available at: +// http://homes.esat.kuleuven.be/~cosicart/pdf/AB-9601/AB-9601.pdf. + +// func init() { +// crypto.RegisterHash(crypto.RIPEMD160, New) +// } + +// The size of the checksum in bytes. +const Size = 20 + +// The block size of the hash algorithm in bytes. +const BlockSize = 64 + +const ( + _s0 = 0x67452301 + _s1 = 0xefcdab89 + _s2 = 0x98badcfe + _s3 = 0x10325476 + _s4 = 0xc3d2e1f0 +) + +// Digest represents the partial evaluation of a checksum. +type Digest struct { + s [5]uint32 // running context + x [BlockSize]byte // temporary buffer + nx int // index into x + tc uint64 // total count of bytes processed +} + +func (d *Digest) Reset() { + d.s[0], d.s[1], d.s[2], d.s[3], d.s[4] = _s0, _s1, _s2, _s3, _s4 + d.nx = 0 + d.tc = 0 +} + +// New returns a new hash.Hash computing the checksum. +func New() hash.Hash { + result := Digest{} + result.Reset() + return &result +} + +func (d *Digest) Size() int { return Size } + +func (d *Digest) BlockSize() int { return BlockSize } + +func (d *Digest) Write(p []byte) (nn int, err error) { + nn = len(p) + d.tc += uint64(nn) + if d.nx > 0 { + n := len(p) + if n > BlockSize-d.nx { + n = BlockSize - d.nx + } + for i := 0; i < n; i++ { + d.x[d.nx+i] = p[i] + } + d.nx += n + if d.nx == BlockSize { + _Block(d, d.x[0:]) + d.nx = 0 + } + p = p[n:] + } + n := _Block(d, p) + p = p[n:] + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +func (d *Digest) Sum(res []byte) []byte { + // Make a copy of d0 so that caller can keep writing and summing. + // d := *d0 + + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + tc := d.tc + var tmp [64]byte + tmp[0] = 0x80 + if tc%64 < 56 { + d.Write(tmp[0 : 56-tc%64]) + } else { + d.Write(tmp[0 : 64+56-tc%64]) + } + + // Length in bits. + tc <<= 3 + for i := uint(0); i < 8; i++ { + tmp[i] = byte(tc >> (8 * i)) + } + d.Write(tmp[0:8]) + + if d.nx != 0 { + panic("d.nx != 0") + } + + var Digest [Size]byte + for i, s := range d.s { + Digest[i*4] = byte(s) + Digest[i*4+1] = byte(s >> 8) + Digest[i*4+2] = byte(s >> 16) + Digest[i*4+3] = byte(s >> 24) + } + + if res != nil { + copy(res, Digest[:]) + return nil + } + return append(res, Digest[:]...) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/pkg/ripemd160/ripemd160block.go b/vendor/github.com/aerospike/aerospike-client-go/pkg/ripemd160/ripemd160block.go new file mode 100644 index 00000000000..bb7fe7d2b6d --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/pkg/ripemd160/ripemd160block.go @@ -0,0 +1,161 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// RIPEMD-160 block step. +// In its own file so that a faster assembly or C version +// can be substituted easily. + +package ripemd160 + +// work buffer indices and roll amounts for one line +var _n = [80]uint{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8, + 3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12, + 1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2, + 4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13, +} + +var _r = [80]uint{ + 11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8, + 7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12, + 11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5, + 11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12, + 9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6, +} + +// same for the other parallel one +var n_ = [80]uint{ + 5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12, + 6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2, + 15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13, + 8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14, + 12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11, +} + +var r_ = [80]uint{ + 8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6, + 9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11, + 9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5, + 15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8, + 8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11, +} + +func _Block(md *Digest, p []byte) int { + n := 0 + var x [16]uint32 + var alpha, beta uint32 + for len(p) >= BlockSize { + a, b, c, d, e := md.s[0], md.s[1], md.s[2], md.s[3], md.s[4] + aa, bb, cc, dd, ee := a, b, c, d, e + j := 0 + for i := 0; i < 16; i++ { + x[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24 + j += 4 + } + + // round 1 + i := 0 + for i < 16 { + alpha = a + (b ^ c ^ d) + x[_n[i]] + s := _r[i] + alpha = (alpha<>(32-s)) + e + beta = c<<10 | c>>22 + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb ^ (cc | ^dd)) + x[n_[i]] + 0x50a28be6 + s = r_[i] + alpha = (alpha<>(32-s)) + ee + beta = cc<<10 | cc>>22 + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 2 + for i < 32 { + alpha = a + (b&c | ^b&d) + x[_n[i]] + 0x5a827999 + s := _r[i] + alpha = (alpha<>(32-s)) + e + beta = c<<10 | c>>22 + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb&dd | cc&^dd) + x[n_[i]] + 0x5c4dd124 + s = r_[i] + alpha = (alpha<>(32-s)) + ee + beta = cc<<10 | cc>>22 + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 3 + for i < 48 { + alpha = a + (b | ^c ^ d) + x[_n[i]] + 0x6ed9eba1 + s := _r[i] + alpha = (alpha<>(32-s)) + e + beta = c<<10 | c>>22 + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb | ^cc ^ dd) + x[n_[i]] + 0x6d703ef3 + s = r_[i] + alpha = (alpha<>(32-s)) + ee + beta = cc<<10 | cc>>22 + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 4 + for i < 64 { + alpha = a + (b&d | c&^d) + x[_n[i]] + 0x8f1bbcdc + s := _r[i] + alpha = (alpha<>(32-s)) + e + beta = c<<10 | c>>22 + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb&cc | ^bb&dd) + x[n_[i]] + 0x7a6d76e9 + s = r_[i] + alpha = (alpha<>(32-s)) + ee + beta = cc<<10 | cc>>22 + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 5 + for i < 80 { + alpha = a + (b ^ (c | ^d)) + x[_n[i]] + 0xa953fd4e + s := _r[i] + alpha = (alpha<>(32-s)) + e + beta = c<<10 | c>>22 + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb ^ cc ^ dd) + x[n_[i]] + s = r_[i] + alpha = (alpha<>(32-s)) + ee + beta = cc<<10 | cc>>22 + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // combine results + dd += c + md.s[1] + md.s[1] = md.s[2] + d + ee + md.s[2] = md.s[3] + e + aa + md.s[3] = md.s[4] + a + bb + md.s[4] = md.s[0] + b + cc + md.s[0] = dd + + p = p[BlockSize:] + n += BlockSize + } + return n +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/policy.go b/vendor/github.com/aerospike/aerospike-client-go/policy.go new file mode 100644 index 00000000000..f7abd6717de --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/policy.go @@ -0,0 +1,84 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "time" +) + +// Policy Interface +type Policy interface { + // Retrieves BasePolicy + GetBasePolicy() *BasePolicy +} + +// BasePolicy encapsulates parameters for transaction policy attributes +// used in all database operation calls. +type BasePolicy struct { + Policy + + // Priority of request relative to other transactions. + // Currently, only used for scans. + Priority Priority //= Priority.DEFAULT; + + // How replicas should be consulted in a read operation to provide the desired + // consistency guarantee. Default to allowing one replica to be used in the + // read operation. + ConsistencyLevel ConsistencyLevel //= CONSISTENCY_ONE + + // Timeout specifies transaction timeout. + // This timeout is used to set the socket timeout and is also sent to the + // server along with the transaction in the wire protocol. + // Default to no timeout (0). + Timeout time.Duration + + // MaxRetries determines maximum number of retries before aborting the current transaction. + // A retry is attempted when there is a network error other than timeout. + // If maxRetries is exceeded, the abort will occur even if the timeout + // has not yet been exceeded. + MaxRetries int //= 2; + + // SleepBetweenReplies determines duration to sleep between retries if a transaction fails and the + // timeout was not exceeded. Enter zero to skip sleep. + SleepBetweenRetries time.Duration //= 1ms; + + // SleepMultiplier specifies the multiplying factor to be used for exponential backoff during retries. + // Default to (1.0); Only values greater than 1 are valid. + SleepMultiplier float64 //= 1.0; + + // ReplicaPolicy detemines the node to the send the read commands containing the key's partition replica type. + // Write commands are not affected by this setting, because all writes are directed + // to the node containing the key's master partition. + // Default to sending read commands to the node containing the key's master partition. + ReplicaPolicy ReplicaPolicy +} + +// NewPolicy generates a new BasePolicy instance with default values. +func NewPolicy() *BasePolicy { + return &BasePolicy{ + Priority: DEFAULT, + ConsistencyLevel: CONSISTENCY_ONE, + Timeout: 0 * time.Millisecond, + MaxRetries: 2, + SleepBetweenRetries: 1 * time.Millisecond, + SleepMultiplier: 1.0, + ReplicaPolicy: MASTER, + } +} + +var _ Policy = &BasePolicy{} + +// GetBasePolicy returns embedded BasePolicy in all types that embed this struct. +func (p *BasePolicy) GetBasePolicy() *BasePolicy { return p } diff --git a/vendor/github.com/aerospike/aerospike-client-go/predexp.go b/vendor/github.com/aerospike/aerospike-client-go/predexp.go new file mode 100644 index 00000000000..740e5242f24 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/predexp.go @@ -0,0 +1,619 @@ +// Copyright 2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "fmt" + "math" + "strconv" +) + +const ( + _AS_PREDEXP_UNKNOWN_BIN uint16 = math.MaxUint16 + + _AS_PREDEXP_AND uint16 = 1 + _AS_PREDEXP_OR uint16 = 2 + _AS_PREDEXP_NOT uint16 = 3 + + _AS_PREDEXP_INTEGER_VALUE uint16 = 10 + _AS_PREDEXP_STRING_VALUE uint16 = 11 + _AS_PREDEXP_GEOJSON_VALUE uint16 = 12 + + _AS_PREDEXP_INTEGER_BIN uint16 = 100 + _AS_PREDEXP_STRING_BIN uint16 = 101 + _AS_PREDEXP_GEOJSON_BIN uint16 = 102 + _AS_PREDEXP_LIST_BIN uint16 = 103 + _AS_PREDEXP_MAP_BIN uint16 = 104 + + _AS_PREDEXP_INTEGER_VAR uint16 = 120 + _AS_PREDEXP_STRING_VAR uint16 = 121 + _AS_PREDEXP_GEOJSON_VAR uint16 = 122 + + _AS_PREDEXP_REC_DEVICE_SIZE uint16 = 150 + _AS_PREDEXP_REC_LAST_UPDATE uint16 = 151 + _AS_PREDEXP_REC_VOID_TIME uint16 = 152 + _AS_PREDEXP_REC_DIGEST_MODULO uint16 = 153 + + _AS_PREDEXP_INTEGER_EQUAL uint16 = 200 + _AS_PREDEXP_INTEGER_UNEQUAL uint16 = 201 + _AS_PREDEXP_INTEGER_GREATER uint16 = 202 + _AS_PREDEXP_INTEGER_GREATEREQ uint16 = 203 + _AS_PREDEXP_INTEGER_LESS uint16 = 204 + _AS_PREDEXP_INTEGER_LESSEQ uint16 = 205 + + _AS_PREDEXP_STRING_EQUAL uint16 = 210 + _AS_PREDEXP_STRING_UNEQUAL uint16 = 211 + _AS_PREDEXP_STRING_REGEX uint16 = 212 + + _AS_PREDEXP_GEOJSON_WITHIN uint16 = 220 + _AS_PREDEXP_GEOJSON_CONTAINS uint16 = 221 + + _AS_PREDEXP_LIST_ITERATE_OR uint16 = 250 + _AS_PREDEXP_MAPKEY_ITERATE_OR uint16 = 251 + _AS_PREDEXP_MAPVAL_ITERATE_OR uint16 = 252 + _AS_PREDEXP_LIST_ITERATE_AND uint16 = 253 + _AS_PREDEXP_MAPKEY_ITERATE_AND uint16 = 254 + _AS_PREDEXP_MAPVAL_ITERATE_AND uint16 = 255 +) + +// ---------------- + +type predExp interface { + String() string + marshaledSize() int + marshal(*baseCommand) error +} + +type predExpBase struct { +} + +func (self *predExpBase) marshaledSize() int { + return 2 + 4 // sizeof(TAG) + sizeof(LEN) +} + +func (self *predExpBase) marshalTL(cmd *baseCommand, tag uint16, len uint32) { + cmd.WriteUint16(tag) + cmd.WriteUint32(len) +} + +// ---------------- predExpAnd + +type predExpAnd struct { + predExpBase + nexpr uint16 // number of child expressions +} + +// String implements the Stringer interface +func (e *predExpAnd) String() string { + return "AND" +} + +// NewPredExpAnd creates an AND predicate. Argument describes the number of expressions. +func NewPredExpAnd(nexpr uint16) *predExpAnd { + return &predExpAnd{nexpr: nexpr} +} + +func (self *predExpAnd) marshaledSize() int { + return self.predExpBase.marshaledSize() + 2 +} + +func (self *predExpAnd) marshal(cmd *baseCommand) error { + self.marshalTL(cmd, _AS_PREDEXP_AND, 2) + cmd.WriteUint16(self.nexpr) + return nil +} + +// ---------------- predExpOr + +type predExpOr struct { + predExpBase + nexpr uint16 // number of child expressions +} + +// String implements the Stringer interface +func (e *predExpOr) String() string { + return "OR" +} + +// NewPredExpOr creates an OR predicate. Argument describes the number of expressions. +func NewPredExpOr(nexpr uint16) *predExpOr { + return &predExpOr{nexpr: nexpr} +} + +func (self *predExpOr) marshaledSize() int { + return self.predExpBase.marshaledSize() + 2 +} + +func (self *predExpOr) marshal(cmd *baseCommand) error { + self.marshalTL(cmd, _AS_PREDEXP_OR, 2) + cmd.WriteUint16(self.nexpr) + return nil +} + +// ---------------- predExpNot + +type predExpNot struct { + predExpBase +} + +// String implements the Stringer interface +func (e *predExpNot) String() string { + return "NOT" +} + +// NewPredExpNot creates a NOT predicate +func NewPredExpNot() *predExpNot { + return &predExpNot{} +} + +func (self *predExpNot) marshaledSize() int { + return self.predExpBase.marshaledSize() +} + +func (self *predExpNot) marshal(cmd *baseCommand) error { + self.marshalTL(cmd, _AS_PREDEXP_NOT, 0) + return nil +} + +// ---------------- predExpIntegerValue + +type predExpIntegerValue struct { + predExpBase + val int64 +} + +// String implements the Stringer interface +func (e *predExpIntegerValue) String() string { + return strconv.FormatInt(e.val, 10) +} + +// NewPredExpIntegerValue embeds an int64 value in a predicate expression. +func NewPredExpIntegerValue(val int64) *predExpIntegerValue { + return &predExpIntegerValue{val: val} +} + +func (self *predExpIntegerValue) marshaledSize() int { + return self.predExpBase.marshaledSize() + 8 +} + +func (self *predExpIntegerValue) marshal(cmd *baseCommand) error { + self.marshalTL(cmd, _AS_PREDEXP_INTEGER_VALUE, 8) + cmd.WriteInt64(self.val) + return nil +} + +// ---------------- predExpStringValue + +type predExpStringValue struct { + predExpBase + val string +} + +// String implements the Stringer interface +func (e *predExpStringValue) String() string { + return "'" + e.val + "'" +} + +// NewPredExpStringValue embeds a string value in a predicate expression. +func NewPredExpStringValue(val string) *predExpStringValue { + return &predExpStringValue{val: val} +} + +func (self *predExpStringValue) marshaledSize() int { + return self.predExpBase.marshaledSize() + len(self.val) +} + +func (self *predExpStringValue) marshal(cmd *baseCommand) error { + self.marshalTL(cmd, _AS_PREDEXP_STRING_VALUE, uint32(len(self.val))) + cmd.WriteString(self.val) + return nil +} + +// ---------------- predExpGeoJSONValue + +type predExpGeoJSONValue struct { + predExpBase + val string +} + +// String implements the Stringer interface +func (e *predExpGeoJSONValue) String() string { + return e.val +} + +// NewPredExpGeoJSONValue embeds a GeoJSON value in a predicate expression. +func NewPredExpGeoJSONValue(val string) *predExpGeoJSONValue { + return &predExpGeoJSONValue{val: val} +} + +func (self *predExpGeoJSONValue) marshaledSize() int { + return self.predExpBase.marshaledSize() + + 1 + // flags + 2 + // ncells + len(self.val) // strlen value +} + +func (self *predExpGeoJSONValue) marshal(cmd *baseCommand) error { + self.marshalTL(cmd, _AS_PREDEXP_GEOJSON_VALUE, uint32(1+2+len(self.val))) + cmd.WriteByte(uint8(0)) + cmd.WriteUint16(0) + cmd.WriteString(self.val) + return nil +} + +// ---------------- predExp???Bin + +type predExpBin struct { + predExpBase + name string + tag uint16 // not marshaled +} + +// String implements the Stringer interface +func (e *predExpBin) String() string { + // FIXME - This is not currently distinguished from a var. + return e.name +} + +// NewPredExpUnknownBin creates a Bin predicate expression which its type is not known. +func NewPredExpUnknownBin(name string) *predExpBin { + return &predExpBin{name: name, tag: _AS_PREDEXP_UNKNOWN_BIN} +} + +// NewPredExpUnknownBin creates a Bin predicate expression which its type is integer. +func NewPredExpIntegerBin(name string) *predExpBin { + return &predExpBin{name: name, tag: _AS_PREDEXP_INTEGER_BIN} +} + +// NewPredExpUnknownBin creates a Bin predicate expression which its type is String. +func NewPredExpStringBin(name string) *predExpBin { + return &predExpBin{name: name, tag: _AS_PREDEXP_STRING_BIN} +} + +// NewPredExpUnknownBin creates a Bin predicate expression which its type is GeoJSON. +func NewPredExpGeoJSONBin(name string) *predExpBin { + return &predExpBin{name: name, tag: _AS_PREDEXP_GEOJSON_BIN} +} + +// NewPredExpUnknownBin creates a Bin predicate expression which its type is List. +func NewPredExpListBin(name string) *predExpBin { + return &predExpBin{name: name, tag: _AS_PREDEXP_LIST_BIN} +} + +// NewPredExpUnknownBin creates a Bin predicate expression which its type is Map. +func NewPredExpMapBin(name string) *predExpBin { + return &predExpBin{name: name, tag: _AS_PREDEXP_MAP_BIN} +} + +func (self *predExpBin) marshaledSize() int { + return self.predExpBase.marshaledSize() + len(self.name) +} + +func (self *predExpBin) marshal(cmd *baseCommand) error { + self.marshalTL(cmd, self.tag, uint32(len(self.name))) + cmd.WriteString(self.name) + return nil +} + +// ---------------- predExp???Var + +type predExpVar struct { + predExpBase + name string + tag uint16 // not marshaled +} + +// String implements the Stringer interface +func (e *predExpVar) String() string { + // FIXME - This is not currently distinguished from a bin. + return e.name +} + +// NewPredExpIntegerVar creates 64 bit integer variable used in list/map iterations. +func NewPredExpIntegerVar(name string) *predExpVar { + return &predExpVar{name: name, tag: _AS_PREDEXP_INTEGER_VAR} +} + +// NewPredExpStringVar creates string variable used in list/map iterations. +func NewPredExpStringVar(name string) *predExpVar { + return &predExpVar{name: name, tag: _AS_PREDEXP_STRING_VAR} +} + +// NewPredExpGeoJSONVar creates GeoJSON variable used in list/map iterations. +func NewPredExpGeoJSONVar(name string) *predExpVar { + return &predExpVar{name: name, tag: _AS_PREDEXP_GEOJSON_VAR} +} + +func (self *predExpVar) marshaledSize() int { + return self.predExpBase.marshaledSize() + len(self.name) +} + +func (self *predExpVar) marshal(cmd *baseCommand) error { + self.marshalTL(cmd, self.tag, uint32(len(self.name))) + cmd.WriteString(self.name) + return nil +} + +// ---------------- predExpMD (RecDeviceSize, RecLastUpdate, RecVoidTime) + +type predExpMD struct { + predExpBase + tag uint16 // not marshaled +} + +// String implements the Stringer interface +func (e *predExpMD) String() string { + switch e.tag { + case _AS_PREDEXP_REC_DEVICE_SIZE: + return "rec.DeviceSize" + case _AS_PREDEXP_REC_LAST_UPDATE: + return "rec.LastUpdate" + case _AS_PREDEXP_REC_VOID_TIME: + return "rec.Expiration" + case _AS_PREDEXP_REC_DIGEST_MODULO: + return "rec.DigestModulo" + default: + panic("Invalid Metadata tag.") + } +} + +func (self *predExpMD) marshaledSize() int { + return self.predExpBase.marshaledSize() +} + +func (self *predExpMD) marshal(cmd *baseCommand) error { + self.marshalTL(cmd, self.tag, 0) + return nil +} + +// NewPredExpRecDeviceSize creates record size on disk predicate +func NewPredExpRecDeviceSize() *predExpMD { + return &predExpMD{tag: _AS_PREDEXP_REC_DEVICE_SIZE} +} + +// NewPredExpRecLastUpdate creates record last update predicate +func NewPredExpRecLastUpdate() *predExpMD { + return &predExpMD{tag: _AS_PREDEXP_REC_LAST_UPDATE} +} + +// NewPredExpRecVoidTime creates record expiration time predicate expressed in nanoseconds since 1970-01-01 epoch as 64 bit integer. +func NewPredExpRecVoidTime() *predExpMD { + return &predExpMD{tag: _AS_PREDEXP_REC_VOID_TIME} +} + +// ---------------- predExpMDDigestModulo + +type predExpMDDigestModulo struct { + predExpBase + mod int32 +} + +// String implements the Stringer interface +func (e *predExpMDDigestModulo) String() string { + return "rec.DigestModulo" +} + +func (self *predExpMDDigestModulo) marshaledSize() int { + return self.predExpBase.marshaledSize() + 4 +} + +func (self *predExpMDDigestModulo) marshal(cmd *baseCommand) error { + self.marshalTL(cmd, _AS_PREDEXP_REC_DIGEST_MODULO, 4) + cmd.WriteInt32(self.mod) + return nil +} + +// NewPredExpRecDigestModulo creates a digest modulo record metadata value predicate expression. +// The digest modulo expression assumes the value of 4 bytes of the +// record's key digest modulo as its argument. +// This predicate is available in Aerospike server versions 3.12.1+ +// +// For example, the following sequence of predicate expressions +// selects records that have digest(key) % 3 == 1): +// stmt.SetPredExp( +// NewPredExpRecDigestModulo(3), +// NewPredExpIntegerValue(1), +// NewPredExpIntegerEqual(), +// ) +func NewPredExpRecDigestModulo(mod int32) *predExpMDDigestModulo { + return &predExpMDDigestModulo{mod: mod} +} + +// ---------------- predExpCompare + +type predExpCompare struct { + predExpBase + tag uint16 // not marshaled +} + +// String implements the Stringer interface +func (e *predExpCompare) String() string { + switch e.tag { + case _AS_PREDEXP_INTEGER_EQUAL, _AS_PREDEXP_STRING_EQUAL: + return "=" + case _AS_PREDEXP_INTEGER_UNEQUAL, _AS_PREDEXP_STRING_UNEQUAL: + return "!=" + case _AS_PREDEXP_INTEGER_GREATER: + return ">" + case _AS_PREDEXP_INTEGER_GREATEREQ: + return ">=" + case _AS_PREDEXP_INTEGER_LESS: + return "<" + case _AS_PREDEXP_INTEGER_LESSEQ: + return "<=" + case _AS_PREDEXP_STRING_REGEX: + return "~=" + case _AS_PREDEXP_GEOJSON_CONTAINS: + return "CONTAINS" + case _AS_PREDEXP_GEOJSON_WITHIN: + return "WITHIN" + default: + panic(fmt.Sprintf("unexpected predicate tag: %d", e.tag)) + } +} + +func (self *predExpCompare) marshaledSize() int { + return self.predExpBase.marshaledSize() +} + +func (self *predExpCompare) marshal(cmd *baseCommand) error { + self.marshalTL(cmd, self.tag, 0) + return nil +} + +// NewPredExpIntegerEqual creates Equal predicate for integer values +func NewPredExpIntegerEqual() *predExpCompare { + return &predExpCompare{tag: _AS_PREDEXP_INTEGER_EQUAL} +} + +// NewPredExpIntegerUnequal creates NotEqual predicate for integer values +func NewPredExpIntegerUnequal() *predExpCompare { + return &predExpCompare{tag: _AS_PREDEXP_INTEGER_UNEQUAL} +} + +// NewPredExpIntegerGreater creates Greater Than predicate for integer values +func NewPredExpIntegerGreater() *predExpCompare { + return &predExpCompare{tag: _AS_PREDEXP_INTEGER_GREATER} +} + +// NewPredExpIntegerGreaterEq creates Greater Than Or Equal predicate for integer values +func NewPredExpIntegerGreaterEq() *predExpCompare { + return &predExpCompare{tag: _AS_PREDEXP_INTEGER_GREATEREQ} +} + +// NewPredExpIntegerLess creates Less Than predicate for integer values +func NewPredExpIntegerLess() *predExpCompare { + return &predExpCompare{tag: _AS_PREDEXP_INTEGER_LESS} +} + +// NewPredExpIntegerLessEq creates Less Than Or Equal predicate for integer values +func NewPredExpIntegerLessEq() *predExpCompare { + return &predExpCompare{tag: _AS_PREDEXP_INTEGER_LESSEQ} +} + +// NewPredExpStringEqual creates Equal predicate for string values +func NewPredExpStringEqual() *predExpCompare { + return &predExpCompare{tag: _AS_PREDEXP_STRING_EQUAL} +} + +// NewPredExpStringUnequal creates Not Equal predicate for string values +func NewPredExpStringUnequal() *predExpCompare { + return &predExpCompare{tag: _AS_PREDEXP_STRING_UNEQUAL} +} + +// NewPredExpGeoJSONWithin creates Within Region predicate for GeoJSON values +func NewPredExpGeoJSONWithin() *predExpCompare { + return &predExpCompare{tag: _AS_PREDEXP_GEOJSON_WITHIN} +} + +// NewPredExpGeoJSONContains creates Region Contains predicate for GeoJSON values +func NewPredExpGeoJSONContains() *predExpCompare { + return &predExpCompare{tag: _AS_PREDEXP_GEOJSON_CONTAINS} +} + +// ---------------- predExpStringRegex + +type predExpStringRegex struct { + predExpBase + cflags uint32 // cflags +} + +// String implements the Stringer interface +func (e *predExpStringRegex) String() string { + return "regex:" +} + +// NewPredExpStringRegex creates a Regex predicate +func NewPredExpStringRegex(cflags uint32) *predExpStringRegex { + return &predExpStringRegex{cflags: cflags} +} + +func (self *predExpStringRegex) marshaledSize() int { + return self.predExpBase.marshaledSize() + 4 +} + +func (self *predExpStringRegex) marshal(cmd *baseCommand) error { + self.marshalTL(cmd, _AS_PREDEXP_STRING_REGEX, 4) + cmd.WriteUint32(self.cflags) + return nil +} + +// ---------------- predExp???Iterate??? + +type predExpIter struct { + predExpBase + name string + tag uint16 // not marshaled +} + +// String implements the Stringer interface +func (e *predExpIter) String() string { + switch e.tag { + case _AS_PREDEXP_LIST_ITERATE_OR: + return "list_iterate_or using \"" + e.name + "\":" + case _AS_PREDEXP_MAPKEY_ITERATE_OR: + return "mapkey_iterate_or using \"" + e.name + "\":" + case _AS_PREDEXP_MAPVAL_ITERATE_OR: + return "mapval_iterate_or using \"" + e.name + "\":" + case _AS_PREDEXP_LIST_ITERATE_AND: + return "list_iterate_and using \"" + e.name + "\":" + case _AS_PREDEXP_MAPKEY_ITERATE_AND: + return "mapkey_iterate_and using \"" + e.name + "\":" + case _AS_PREDEXP_MAPVAL_ITERATE_AND: + return "mapval_iterate_and using \"" + e.name + "\":" + default: + panic("Invalid Metadata tag.") + } +} + +// NewPredExpListIterateOr creates an Or iterator predicate for list items +func NewPredExpListIterateOr(name string) *predExpIter { + return &predExpIter{name: name, tag: _AS_PREDEXP_LIST_ITERATE_OR} +} + +// NewPredExpMapKeyIterateOr creates an Or iterator predicate on map keys +func NewPredExpMapKeyIterateOr(name string) *predExpIter { + return &predExpIter{name: name, tag: _AS_PREDEXP_MAPKEY_ITERATE_OR} +} + +// NewPredExpMapValIterateOr creates an Or iterator predicate on map values +func NewPredExpMapValIterateOr(name string) *predExpIter { + return &predExpIter{name: name, tag: _AS_PREDEXP_MAPVAL_ITERATE_OR} +} + +// NewPredExpListIterateAnd creates an And iterator predicate for list items +func NewPredExpListIterateAnd(name string) *predExpIter { + return &predExpIter{name: name, tag: _AS_PREDEXP_LIST_ITERATE_AND} +} + +// NewPredExpMapKeyIterateAnd creates an And iterator predicate on map keys +func NewPredExpMapKeyIterateAnd(name string) *predExpIter { + return &predExpIter{name: name, tag: _AS_PREDEXP_MAPKEY_ITERATE_AND} +} + +// NewPredExpMapKeyIterateAnd creates an And iterator predicate on map values +func NewPredExpMapValIterateAnd(name string) *predExpIter { + return &predExpIter{name: name, tag: _AS_PREDEXP_MAPVAL_ITERATE_AND} +} + +func (self *predExpIter) marshaledSize() int { + return self.predExpBase.marshaledSize() + len(self.name) +} + +func (self *predExpIter) marshal(cmd *baseCommand) error { + self.marshalTL(cmd, self.tag, uint32(len(self.name))) + cmd.WriteString(self.name) + return nil +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/priority.go b/vendor/github.com/aerospike/aerospike-client-go/priority.go new file mode 100644 index 00000000000..123d132e995 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/priority.go @@ -0,0 +1,33 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +// Priority of operations on database server. +type Priority int + +const ( + + // DEFAULT determines that the server defines the priority. + DEFAULT Priority = iota + + // LOW determines that the server should run the operation in a background thread. + LOW + + // MEDIUM determines that the server should run the operation at medium priority. + MEDIUM + + // HIGH determines that the server should run the operation at the highest priority. + HIGH +) diff --git a/vendor/github.com/aerospike/aerospike-client-go/privilege.go b/vendor/github.com/aerospike/aerospike-client-go/privilege.go new file mode 100644 index 00000000000..f4914c357bf --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/privilege.go @@ -0,0 +1,109 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Portions may be licensed to Aerospike, Inc. under one or more contributor +// license agreements. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. + +package aerospike + +import "fmt" + +type privilegeCode string + +// Privilege determines user access granularity. +type Privilege struct { + // Role + Code privilegeCode + + // Namespace determines namespace scope. Apply permission to this namespace only. + // If namespace is zero value, the privilege applies to all namespaces. + Namespace string + + // Set name scope. Apply permission to this set within namespace only. + // If set is zero value, the privilege applies to all sets within namespace. + SetName string +} + +func (p *Privilege) code() int { + switch p.Code { + // User can edit/remove other users. Global scope only. + case UserAdmin: + return 0 + + // User can perform systems administration functions on a database that do not involve user + // administration. Examples include server configuration. + // Global scope only. + case SysAdmin: + return 1 + + // User can perform data administration functions on a database that do not involve user + // administration. Examples include index and user defined function management. + // Global scope only. + case DataAdmin: + return 2 + + // User can read data only. + case Read: + return 10 + + // User can read and write data. + case ReadWrite: + return 11 + + // User can read and write data through user defined functions. + case ReadWriteUDF: + return 12 + } + + panic("invalid role: " + p.Code) +} + +func privilegeFrom(code uint8) privilegeCode { + switch code { + // User can edit/remove other users. Global scope only. + case 0: + return UserAdmin + + // User can perform systems administration functions on a database that do not involve user + // administration. Examples include server configuration. + // Global scope only. + case 1: + return SysAdmin + + // User can perform data administration functions on a database that do not involve user + // administration. Examples include index and user defined function management. + // Global scope only. + case 2: + return DataAdmin + + // User can read data only. + case 10: + return Read + + // User can read and write data. + case 11: + return ReadWrite + + // User can read and write data through user defined functions. + case 12: + return ReadWriteUDF + } + + panic(fmt.Sprintf("invalid privilege code: %v", code)) +} + +func (p *Privilege) canScope() bool { + if p.code() >= 10 { + return true + } + return false +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/query_aggregate_command.go b/vendor/github.com/aerospike/aerospike-client-go/query_aggregate_command.go new file mode 100644 index 00000000000..58d5bfe7098 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/query_aggregate_command.go @@ -0,0 +1,163 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "fmt" + + . "github.com/aerospike/aerospike-client-go/types" + Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" + "github.com/yuin/gopher-lua" +) + +type queryAggregateCommand struct { + queryCommand + + luaInstance *lua.LState + inputChan chan interface{} +} + +func newQueryAggregateCommand(node *Node, policy *QueryPolicy, statement *Statement, recordset *Recordset) *queryAggregateCommand { + cmd := &queryAggregateCommand{ + queryCommand: *newQueryCommand(node, policy, statement, recordset), + } + + cmd.terminationErrorType = QUERY_TERMINATED + + return cmd +} + +func (cmd *queryAggregateCommand) Execute() error { + // defer cmd.recordset.signalEnd() + err := cmd.execute(cmd) + if err != nil { + cmd.recordset.sendError(err) + } + return err +} + +func (cmd *queryAggregateCommand) parseRecordResults(ifc command, receiveSize int) (bool, error) { + // Read/parse remaining message bytes one record at a time. + cmd.dataOffset = 0 + + for cmd.dataOffset < receiveSize { + if err := cmd.readBytes(int(_MSG_REMAINING_HEADER_SIZE)); err != nil { + err = newNodeError(cmd.node, err) + return false, err + } + resultCode := ResultCode(cmd.dataBuffer[5] & 0xFF) + + if resultCode != 0 { + if resultCode == KEY_NOT_FOUND_ERROR { + // consume the rest of the input buffer from the socket + if cmd.dataOffset < receiveSize { + if err := cmd.readBytes(receiveSize - cmd.dataOffset); err != nil { + err = newNodeError(cmd.node, err) + return false, err + } + } + return false, nil + } + err := NewAerospikeError(resultCode) + err = newNodeError(cmd.node, err) + return false, err + } + + info3 := int(cmd.dataBuffer[3]) + + // If cmd is the end marker of the response, do not proceed further + if (info3 & _INFO3_LAST) == _INFO3_LAST { + return false, nil + } + + // generation := Buffer.BytesToUint32(cmd.dataBuffer, 6) + // expiration := TTL(Buffer.BytesToUint32(cmd.dataBuffer, 10)) + fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 18)) + opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 20)) + + if opCount != 1 { + err := fmt.Errorf("Query aggregate command expects exactly only one bin. Received: %d", opCount) + err = newNodeError(cmd.node, err) + return false, err + } + + _, err := cmd.parseKey(fieldCount) + if err != nil { + err = newNodeError(cmd.node, err) + return false, err + } + + // if there is a recordset, process the record traditionally + // otherwise, it is supposed to be a record channel + + // Parse bins. + var bins BinMap + + for i := 0; i < opCount; i++ { + if err := cmd.readBytes(8); err != nil { + err = newNodeError(cmd.node, err) + return false, err + } + + opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, 0)) + particleType := int(cmd.dataBuffer[5]) + nameSize := int(cmd.dataBuffer[7]) + + if err := cmd.readBytes(nameSize); err != nil { + err = newNodeError(cmd.node, err) + return false, err + } + name := string(cmd.dataBuffer[:nameSize]) + + particleBytesSize := int((opSize - (4 + nameSize))) + if err = cmd.readBytes(particleBytesSize); err != nil { + err = newNodeError(cmd.node, err) + return false, err + } + value, err := bytesToParticle(particleType, cmd.dataBuffer, 0, particleBytesSize) + if err != nil { + err = newNodeError(cmd.node, err) + return false, err + } + + if bins == nil { + bins = make(BinMap, opCount) + } + bins[name] = value + } + + recs, exists := bins["SUCCESS"] + if !exists { + if errStr, exists := bins["FAILURE"]; exists { + err = NewAerospikeError(QUERY_GENERIC, errStr.(string)) + return false, err + } else { + err = NewAerospikeError(QUERY_GENERIC, fmt.Sprintf("QueryAggregate's expected result was not returned. Received: %v", bins)) + return false, err + } + } + + // If the channel is full and it blocks, we don't want this command to + // block forever, or panic in case the channel is closed in the meantime. + select { + // send back the result on the async channel + case cmd.inputChan <- recs: + case <-cmd.recordset.cancelled: + return false, NewAerospikeError(QUERY_TERMINATED) + } + } + + return true, nil +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/query_command.go b/vendor/github.com/aerospike/aerospike-client-go/query_command.go new file mode 100644 index 00000000000..7b46c08dedb --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/query_command.go @@ -0,0 +1,52 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +type queryCommand struct { + baseMultiCommand + + policy *QueryPolicy + statement *Statement +} + +func newQueryCommand(node *Node, policy *QueryPolicy, statement *Statement, recordset *Recordset) *queryCommand { + return &queryCommand{ + baseMultiCommand: *newMultiCommand(node, recordset), + policy: policy, + statement: statement, + } +} + +func (cmd *queryCommand) getPolicy(ifc command) Policy { + return cmd.policy +} + +func (cmd *queryCommand) writeBuffer(ifc command) (err error) { + return cmd.setQuery(cmd.policy, cmd.statement, false) +} + +func (cmd *queryCommand) parseResult(ifc command, conn *Connection) error { + return cmd.baseMultiCommand.parseResult(ifc, conn) +} + +// Execute will run the query. +func (cmd *queryCommand) Execute() error { + defer cmd.recordset.signalEnd() + err := cmd.execute(cmd) + if err != nil { + cmd.recordset.sendError(err) + } + return err +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/query_objects_command.go b/vendor/github.com/aerospike/aerospike-client-go/query_objects_command.go new file mode 100644 index 00000000000..6b8f0118048 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/query_objects_command.go @@ -0,0 +1,40 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import . "github.com/aerospike/aerospike-client-go/types" + +type queryObjectsCommand struct { + queryCommand +} + +func newQueryObjectsCommand(node *Node, policy *QueryPolicy, statement *Statement, recordset *Recordset) *queryObjectsCommand { + cmd := &queryObjectsCommand{ + queryCommand: *newQueryCommand(node, policy, statement, recordset), + } + + cmd.terminationErrorType = QUERY_TERMINATED + + return cmd +} + +func (cmd *queryObjectsCommand) Execute() error { + defer cmd.recordset.signalEnd() + err := cmd.execute(cmd) + if err != nil { + cmd.recordset.sendError(err) + } + return err +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/query_policy.go b/vendor/github.com/aerospike/aerospike-client-go/query_policy.go new file mode 100644 index 00000000000..d7ddac3c81a --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/query_policy.go @@ -0,0 +1,27 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +// QueryPolicy encapsulates parameters for policy attributes used in query operations. +type QueryPolicy struct { + *MultiPolicy +} + +// NewQueryPolicy generates a new QueryPolicy instance with default values. +func NewQueryPolicy() *QueryPolicy { + return &QueryPolicy{ + MultiPolicy: NewMultiPolicy(), + } +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/query_record_command.go b/vendor/github.com/aerospike/aerospike-client-go/query_record_command.go new file mode 100644 index 00000000000..414d1eb56e0 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/query_record_command.go @@ -0,0 +1,31 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import . "github.com/aerospike/aerospike-client-go/types" + +type queryRecordCommand struct { + queryCommand +} + +func newQueryRecordCommand(node *Node, policy *QueryPolicy, statement *Statement, recordset *Recordset) *queryRecordCommand { + cmd := &queryRecordCommand{ + queryCommand: *newQueryCommand(node, policy, statement, recordset), + } + + cmd.terminationErrorType = QUERY_TERMINATED + + return cmd +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/read_command.go b/vendor/github.com/aerospike/aerospike-client-go/read_command.go new file mode 100644 index 00000000000..3f7922da360 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/read_command.go @@ -0,0 +1,210 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "reflect" + + . "github.com/aerospike/aerospike-client-go/logger" + + . "github.com/aerospike/aerospike-client-go/types" + Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" +) + +type readCommand struct { + singleCommand + + policy *BasePolicy + binNames []string + record *Record + + // pointer to the object that's going to be unmarshalled + object *reflect.Value +} + +// this method uses reflection. +// Will not be set if performance flag is passed for the build. +var objectParser func( + cmd *readCommand, + opCount int, + fieldCount int, + generation uint32, + expiration uint32, +) error + +func newReadCommand(cluster *Cluster, policy *BasePolicy, key *Key, binNames []string) readCommand { + return readCommand{ + singleCommand: newSingleCommand(cluster, key), + binNames: binNames, + policy: policy, + } +} + +func (cmd *readCommand) getPolicy(ifc command) Policy { + return cmd.policy +} + +func (cmd *readCommand) writeBuffer(ifc command) error { + return cmd.setRead(cmd.policy, cmd.key, cmd.binNames) +} + +func (cmd *readCommand) getNode(ifc command) (*Node, error) { + return cmd.cluster.getReadNode(&cmd.partition, cmd.policy.ReplicaPolicy) +} + +func (cmd *readCommand) parseResult(ifc command, conn *Connection) error { + // Read header. + _, err := conn.Read(cmd.dataBuffer, int(_MSG_TOTAL_HEADER_SIZE)) + if err != nil { + Logger.Warn("parse result error: " + err.Error()) + return err + } + + // A number of these are commented out because we just don't care enough to read + // that section of the header. If we do care, uncomment and check! + sz := Buffer.BytesToInt64(cmd.dataBuffer, 0) + + // Validate header to make sure we are at the beginning of a message + if err := cmd.validateHeader(sz); err != nil { + return err + } + + headerLength := int(cmd.dataBuffer[8]) + resultCode := ResultCode(cmd.dataBuffer[13] & 0xFF) + generation := Buffer.BytesToUint32(cmd.dataBuffer, 14) + expiration := TTL(Buffer.BytesToUint32(cmd.dataBuffer, 18)) + fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 26)) // almost certainly 0 + opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 28)) + receiveSize := int((sz & 0xFFFFFFFFFFFF) - int64(headerLength)) + + // Read remaining message bytes. + if receiveSize > 0 { + if err = cmd.sizeBufferSz(receiveSize); err != nil { + return err + } + _, err = conn.Read(cmd.dataBuffer, receiveSize) + if err != nil { + Logger.Warn("parse result error: " + err.Error()) + return err + } + + } + + if resultCode != 0 { + if resultCode == KEY_NOT_FOUND_ERROR && cmd.object == nil { + return nil + } + + if resultCode == UDF_BAD_RESPONSE { + cmd.record, _ = cmd.parseRecord(opCount, fieldCount, generation, expiration) + err := cmd.handleUdfError(resultCode) + Logger.Warn("UDF execution error: " + err.Error()) + return err + } + + return NewAerospikeError(resultCode) + } + + if cmd.object == nil { + if opCount == 0 { + // data Bin was not returned + cmd.record = newRecord(cmd.node, cmd.key, nil, generation, expiration) + return nil + } + + cmd.record, err = cmd.parseRecord(opCount, fieldCount, generation, expiration) + if err != nil { + return err + } + } else if objectParser != nil { + if err := objectParser(cmd, opCount, fieldCount, generation, expiration); err != nil { + return err + } + } + + return nil +} + +func (cmd *readCommand) handleUdfError(resultCode ResultCode) error { + if ret, exists := cmd.record.Bins["FAILURE"]; exists { + return NewAerospikeError(resultCode, ret.(string)) + } + return NewAerospikeError(resultCode) +} + +func (cmd *readCommand) parseRecord( + opCount int, + fieldCount int, + generation uint32, + expiration uint32, +) (*Record, error) { + var bins BinMap + receiveOffset := 0 + + // There can be fields in the response (setname etc). + // But for now, ignore them. Expose them to the API if needed in the future. + // Logger.Debug("field count: %d, databuffer: %v", fieldCount, cmd.dataBuffer) + if fieldCount > 0 { + // Just skip over all the fields + for i := 0; i < fieldCount; i++ { + // Logger.Debug("%d", receiveOffset) + fieldSize := int(Buffer.BytesToUint32(cmd.dataBuffer, receiveOffset)) + receiveOffset += (4 + fieldSize) + } + } + + if opCount > 0 { + bins = make(BinMap, opCount) + } + + for i := 0; i < opCount; i++ { + opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, receiveOffset)) + particleType := int(cmd.dataBuffer[receiveOffset+5]) + nameSize := int(cmd.dataBuffer[receiveOffset+7]) + name := string(cmd.dataBuffer[receiveOffset+8 : receiveOffset+8+nameSize]) + receiveOffset += 4 + 4 + nameSize + + particleBytesSize := int(opSize - (4 + nameSize)) + value, _ := bytesToParticle(particleType, cmd.dataBuffer, receiveOffset, particleBytesSize) + receiveOffset += particleBytesSize + + if bins == nil { + bins = make(BinMap, opCount) + } + + // for operate list command results + if prev, exists := bins[name]; exists { + if res, ok := prev.([]interface{}); ok { + // List already exists. Add to it. + bins[name] = append(res, value) + } else { + // Make a list to store all values. + bins[name] = []interface{}{prev, value} + } + } else { + bins[name] = value + } + } + + return newRecord(cmd.node, cmd.key, bins, generation, expiration), nil +} + +func (cmd *readCommand) GetRecord() *Record { + return cmd.record +} + +func (cmd *readCommand) Execute() error { + return cmd.execute(cmd) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/read_command_reflect.go b/vendor/github.com/aerospike/aerospike-client-go/read_command_reflect.go new file mode 100644 index 00000000000..cb9ea1c8906 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/read_command_reflect.go @@ -0,0 +1,451 @@ +// +build !as_performance + +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "errors" + "math" + "reflect" + "strings" + "time" + + . "github.com/aerospike/aerospike-client-go/types" + Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" +) + +// if this file is included in the build, it will include this method +func init() { + objectParser = parseObject +} + +func parseObject( + cmd *readCommand, + opCount int, + fieldCount int, + generation uint32, + expiration uint32, +) error { + receiveOffset := 0 + + // There can be fields in the response (setname etc). + // But for now, ignore them. Expose them to the API if needed in the future. + // Logger.Debug("field count: %d, databuffer: %v", fieldCount, cmd.dataBuffer) + if fieldCount > 0 { + // Just skip over all the fields + for i := 0; i < fieldCount; i++ { + // Logger.Debug("%d", receiveOffset) + fieldSize := int(Buffer.BytesToUint32(cmd.dataBuffer, receiveOffset)) + receiveOffset += (4 + fieldSize) + } + } + + if opCount > 0 { + rv := *cmd.object + + if rv.Kind() != reflect.Ptr { + return errors.New("Invalid type for result object. It should be of type Struct Pointer.") + } + rv = rv.Elem() + + if !rv.CanAddr() { + return errors.New("Invalid type for object. It should be addressable (a pointer)") + } + + if rv.Kind() != reflect.Struct { + return errors.New("Invalid type for object. It should be a pointer to a struct.") + } + + // find the name based on tag mapping + iobj := indirect(rv) + mappings := objectMappings.getMapping(iobj.Type()) + + if err := setObjectMetaFields(iobj, TTL(expiration), generation); err != nil { + return err + } + + for i := 0; i < opCount; i++ { + opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, receiveOffset)) + particleType := int(cmd.dataBuffer[receiveOffset+5]) + nameSize := int(cmd.dataBuffer[receiveOffset+7]) + name := string(cmd.dataBuffer[receiveOffset+8 : receiveOffset+8+nameSize]) + receiveOffset += 4 + 4 + nameSize + + particleBytesSize := int(opSize - (4 + nameSize)) + value, _ := bytesToParticle(particleType, cmd.dataBuffer, receiveOffset, particleBytesSize) + if err := setObjectField(mappings, iobj, name, value); err != nil { + return err + } + + receiveOffset += particleBytesSize + } + } + + return nil +} + +func setObjectMetaFields(obj reflect.Value, ttl, gen uint32) error { + // find the name based on tag mapping + iobj := indirect(obj) + + ttlMap, genMap := objectMappings.getMetaMappings(iobj.Type()) + + if ttlMap != nil { + for i := range ttlMap { + f := iobj.FieldByName(ttlMap[i]) + if err := setValue(f, ttl); err != nil { + return err + } + } + } + + if genMap != nil { + for i := range genMap { + f := iobj.FieldByName(genMap[i]) + if err := setValue(f, gen); err != nil { + return err + } + } + } + + return nil +} + +func setObjectField(mappings map[string]string, obj reflect.Value, fieldName string, value interface{}) error { + if value == nil { + return nil + } + + if name, exists := mappings[fieldName]; exists { + fieldName = name + } + f := obj.FieldByName(fieldName) + return setValue(f, value) +} + +func setValue(f reflect.Value, value interface{}) error { + // find the name based on tag mapping + if f.CanSet() { + if value == nil { + if f.IsValid() && !f.IsNil() { + f.Set(reflect.ValueOf(value)) + } + return nil + } + + switch f.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + f.SetInt(int64(value.(int))) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + switch v := value.(type) { + case uint8: + f.SetUint(uint64(v)) + case uint16: + f.SetUint(uint64(v)) + case uint32: + f.SetUint(uint64(v)) + case uint64: + f.SetUint(uint64(v)) + case uint: + f.SetUint(uint64(v)) + default: + f.SetUint(uint64(value.(int))) + } + case reflect.Float64, reflect.Float32: + // if value has returned as a float + if fv, ok := value.(float64); ok { + f.SetFloat(fv) + } else { + // otherwise it is an old float64<->int64 marshalling type cast which needs to be set as int + f.SetFloat(float64(math.Float64frombits(uint64(value.(int))))) + } + case reflect.String: + rv := reflect.ValueOf(value.(string)) + if rv.Type() != f.Type() { + rv = rv.Convert(f.Type()) + } + f.Set(rv) + case reflect.Bool: + f.SetBool(value.(int) == 1) + case reflect.Interface: + if value != nil { + f.Set(reflect.ValueOf(value)) + } + case reflect.Ptr: + switch f.Type().Elem().Kind() { + case reflect.Int: + tempV := int(value.(int)) + rv := reflect.ValueOf(&tempV) + if rv.Type() != f.Type() { + rv = rv.Convert(f.Type()) + } + f.Set(rv) + case reflect.Uint: + tempV := uint(value.(int)) + rv := reflect.ValueOf(&tempV) + if rv.Type() != f.Type() { + rv = rv.Convert(f.Type()) + } + f.Set(rv) + case reflect.String: + tempV := string(value.(string)) + rv := reflect.ValueOf(&tempV) + if rv.Type() != f.Type() { + rv = rv.Convert(f.Type()) + } + f.Set(rv) + case reflect.Int8: + tempV := int8(value.(int)) + rv := reflect.ValueOf(&tempV) + if rv.Type() != f.Type() { + rv = rv.Convert(f.Type()) + } + f.Set(rv) + case reflect.Uint8: + tempV := uint8(value.(int)) + rv := reflect.ValueOf(&tempV) + if rv.Type() != f.Type() { + rv = rv.Convert(f.Type()) + } + f.Set(rv) + case reflect.Int16: + tempV := int16(value.(int)) + rv := reflect.ValueOf(&tempV) + if rv.Type() != f.Type() { + rv = rv.Convert(f.Type()) + } + f.Set(rv) + case reflect.Uint16: + tempV := uint16(value.(int)) + rv := reflect.ValueOf(&tempV) + if rv.Type() != f.Type() { + rv = rv.Convert(f.Type()) + } + f.Set(rv) + case reflect.Int32: + tempV := int32(value.(int)) + rv := reflect.ValueOf(&tempV) + if rv.Type() != f.Type() { + rv = rv.Convert(f.Type()) + } + f.Set(rv) + case reflect.Uint32: + tempV := uint32(value.(int)) + rv := reflect.ValueOf(&tempV) + if rv.Type() != f.Type() { + rv = rv.Convert(f.Type()) + } + f.Set(rv) + case reflect.Int64: + tempV := int64(value.(int)) + rv := reflect.ValueOf(&tempV) + if rv.Type() != f.Type() { + rv = rv.Convert(f.Type()) + } + f.Set(rv) + case reflect.Uint64: + tempV := uint64(value.(int)) + rv := reflect.ValueOf(&tempV) + if rv.Type() != f.Type() { + rv = rv.Convert(f.Type()) + } + f.Set(rv) + case reflect.Float64: + // it is possible that the value is an integer set in the field + // via the old float<->int64 type cast + var tempV float64 + if fv, ok := value.(float64); ok { + tempV = fv + } else { + tempV = math.Float64frombits(uint64(value.(int))) + } + + rv := reflect.ValueOf(&tempV) + if rv.Type() != f.Type() { + rv = rv.Convert(f.Type()) + } + f.Set(rv) + case reflect.Bool: + tempV := bool(value.(int) == 1) + rv := reflect.ValueOf(&tempV) + if rv.Type() != f.Type() { + rv = rv.Convert(f.Type()) + } + f.Set(rv) + case reflect.Float32: + // it is possible that the value is an integer set in the field + // via the old float<->int64 type cast + var tempV64 float64 + if fv, ok := value.(float64); ok { + tempV64 = fv + } else { + tempV64 = math.Float64frombits(uint64(value.(int))) + } + + tempV := float32(tempV64) + rv := reflect.ValueOf(&tempV) + if rv.Type() != f.Type() { + rv = rv.Convert(f.Type()) + } + f.Set(rv) + case reflect.Interface: + f.Set(reflect.ValueOf(&value)) + case reflect.Struct: + // support time.Time + if f.Type().Elem().PkgPath() == "time" && f.Type().Elem().Name() == "Time" { + tm := time.Unix(0, int64(value.(int))) + f.Set(reflect.ValueOf(&tm)) + break + } else { + valMap := value.(map[interface{}]interface{}) + // iteraste over struct fields and recursively fill them up + if valMap != nil { + newObjPtr := f + if f.IsNil() { + newObjPtr = reflect.New(f.Type().Elem()) + } + theStruct := newObjPtr.Elem().Type() + numFields := newObjPtr.Elem().NumField() + for i := 0; i < numFields; i++ { + // skip unexported fields + fld := theStruct.Field(i) + if fld.PkgPath != "" { + continue + } + + alias := fld.Name + tag := strings.Trim(fld.Tag.Get(aerospikeTag), " ") + if tag != "" { + alias = tag + } + + if valMap[alias] != nil { + if err := setValue(reflect.Indirect(newObjPtr).FieldByName(fld.Name), valMap[alias]); err != nil { + return err + } + } + } + + // set the field + f.Set(newObjPtr) + } + } + } // switch ptr + case reflect.Slice, reflect.Array: + // BLOBs come back as []byte + theArray := reflect.ValueOf(value) + + if f.Kind() == reflect.Slice { + if f.IsNil() { + f.Set(reflect.MakeSlice(reflect.SliceOf(f.Type().Elem()), theArray.Len(), theArray.Len())) + } else if f.Len() < theArray.Len() { + count := theArray.Len() - f.Len() + f = reflect.AppendSlice(f, reflect.MakeSlice(reflect.SliceOf(f.Type().Elem()), count, count)) + } + } + + for i := 0; i < theArray.Len(); i++ { + if err := setValue(f.Index(i), theArray.Index(i).Interface()); err != nil { + return err + } + } + case reflect.Map: + emptyStruct := reflect.ValueOf(struct{}{}) + theMap := value.(map[interface{}]interface{}) + if theMap != nil { + newMap := reflect.MakeMap(f.Type()) + var newKey, newVal reflect.Value + for key, elem := range theMap { + if key != nil { + newKey = reflect.ValueOf(key) + } else { + newKey = reflect.Zero(f.Type().Key()) + } + + if newKey.Type() != f.Type().Key() { + newKey = newKey.Convert(f.Type().Key()) + } + + if elem != nil { + newVal = reflect.ValueOf(elem) + } else { + newVal = reflect.Zero(f.Type().Elem()) + } + + if newVal.Type() != f.Type().Elem() { + switch newVal.Kind() { + case reflect.Map, reflect.Slice, reflect.Array: + newVal = reflect.New(f.Type().Elem()) + if err := setValue(newVal.Elem(), elem); err != nil { + return err + } + newVal = reflect.Indirect(newVal) + default: + newVal = newVal.Convert(f.Type().Elem()) + } + } + + if newVal.Kind() == reflect.Map && newVal.Len() == 0 && newMap.Type().Elem().Kind() == emptyStruct.Type().Kind() { + if newMap.Type().Elem().NumField() == 0 { + newMap.SetMapIndex(newKey, emptyStruct) + } else { + return errors.New("Map value type is struct{}, but data returned from database is a non-empty map[interface{}]interface{}") + } + } else { + newMap.SetMapIndex(newKey, newVal) + } + } + f.Set(newMap) + } + + case reflect.Struct: + // support time.Time + if f.Type().PkgPath() == "time" && f.Type().Name() == "Time" { + f.Set(reflect.ValueOf(time.Unix(0, int64(value.(int))))) + break + } + + valMap := value.(map[interface{}]interface{}) + // iteraste over struct fields and recursively fill them up + typeOfT := f.Type() + numFields := f.NumField() + for i := 0; i < numFields; i++ { + fld := typeOfT.Field(i) + // skip unexported fields + if fld.PkgPath != "" { + continue + } + + alias := fld.Name + tag := strings.Trim(fld.Tag.Get(aerospikeTag), " ") + if tag != "" { + alias = tag + } + + if valMap[alias] != nil { + if err := setValue(f.FieldByName(fld.Name), valMap[alias]); err != nil { + return err + } + } + } + + // set the field + f.Set(f) + } + } + + return nil +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/read_header_command.go b/vendor/github.com/aerospike/aerospike-client-go/read_header_command.go new file mode 100644 index 00000000000..69c3b2ba0b5 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/read_header_command.go @@ -0,0 +1,88 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + . "github.com/aerospike/aerospike-client-go/types" + Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" +) + +type readHeaderCommand struct { + singleCommand + + policy *BasePolicy + record *Record +} + +func newReadHeaderCommand(cluster *Cluster, policy *BasePolicy, key *Key) *readHeaderCommand { + newReadHeaderCmd := &readHeaderCommand{ + singleCommand: newSingleCommand(cluster, key), + policy: policy, + } + + return newReadHeaderCmd +} + +func (cmd *readHeaderCommand) getPolicy(ifc command) Policy { + return cmd.policy +} + +func (cmd *readHeaderCommand) writeBuffer(ifc command) error { + return cmd.setReadHeader(cmd.policy, cmd.key) +} + +func (cmd *readHeaderCommand) getNode(ifc command) (*Node, error) { + return cmd.cluster.getReadNode(&cmd.partition, cmd.policy.ReplicaPolicy) +} + +func (cmd *readHeaderCommand) parseResult(ifc command, conn *Connection) error { + // Read header. + if _, err := conn.Read(cmd.dataBuffer, int(_MSG_TOTAL_HEADER_SIZE)); err != nil { + return err + } + + header := Buffer.BytesToInt64(cmd.dataBuffer, 0) + + // Validate header to make sure we are at the beginning of a message + if err := cmd.validateHeader(header); err != nil { + return err + } + + resultCode := cmd.dataBuffer[13] & 0xFF + + if resultCode == 0 { + generation := Buffer.BytesToUint32(cmd.dataBuffer, 14) + expiration := TTL(Buffer.BytesToUint32(cmd.dataBuffer, 18)) + cmd.record = newRecord(cmd.node, cmd.key, nil, generation, expiration) + } else { + if ResultCode(resultCode) == KEY_NOT_FOUND_ERROR { + cmd.record = nil + } else { + return NewAerospikeError(ResultCode(resultCode)) + } + } + if err := cmd.emptySocket(conn); err != nil { + return err + } + return nil +} + +func (cmd *readHeaderCommand) GetRecord() *Record { + return cmd.record +} + +func (cmd *readHeaderCommand) Execute() error { + return cmd.execute(cmd) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/record.go b/vendor/github.com/aerospike/aerospike-client-go/record.go new file mode 100644 index 00000000000..f3ef7f652f0 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/record.go @@ -0,0 +1,61 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import "fmt" + +// Record is the container struct for database records. +// Records are equivalent to rows. +type Record struct { + // Key is the record's key. + // Might be empty, or may only consist of digest value. + Key *Key + + // Node from which the Record is originating from. + Node *Node + + // Bins is the map of requested name/value bins. + Bins BinMap + + // Generation shows record modification count. + Generation uint32 + + // Expiration is TTL (Time-To-Live). + // Number of seconds until record expires. + Expiration uint32 +} + +func newRecord(node *Node, key *Key, bins BinMap, generation, expiration uint32) *Record { + r := &Record{ + Node: node, + Key: key, + Bins: bins, + Generation: generation, + Expiration: expiration, + } + + // always assign a map of length zero if Bins is nil + if r.Bins == nil { + r.Bins = make(BinMap, 0) + } + + return r +} + +// String implements the Stringer interface. +// Returns string representation of record. +func (rc *Record) String() string { + return fmt.Sprintf("%s %v", rc.Key, rc.Bins) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/record_exists_action.go b/vendor/github.com/aerospike/aerospike-client-go/record_exists_action.go new file mode 100644 index 00000000000..5b8cdbb450a --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/record_exists_action.go @@ -0,0 +1,45 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +// RecordExistsAction determines how to handle writes when +// the record already exists. +type RecordExistsAction int + +const ( + + // UPDATE means: Create or update record. + // Merge write command bins with existing bins. + UPDATE RecordExistsAction = iota + + // UPDATE_ONLY means: Update record only. Fail if record does not exist. + // Merge write command bins with existing bins. + UPDATE_ONLY + + // REPLACE means: Create or replace record. + // Delete existing bins not referenced by write command bins. + // Supported by Aerospike 2 server versions >= 2.7.5 and + // Aerospike 3 server versions >= 3.1.6. + REPLACE + + // REPLACE_ONLY means: Replace record only. Fail if record does not exist. + // Delete existing bins not referenced by write command bins. + // Supported by Aerospike 2 server versions >= 2.7.5 and + // Aerospike 3 server versions >= 3.1.6. + REPLACE_ONLY + + // CREATE_ONLY means: Create only. Fail if record exists. + CREATE_ONLY +) diff --git a/vendor/github.com/aerospike/aerospike-client-go/recordset.go b/vendor/github.com/aerospike/aerospike-client-go/recordset.go new file mode 100644 index 00000000000..97ee6d6b3f2 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/recordset.go @@ -0,0 +1,246 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "fmt" + "reflect" + "runtime" + "sync" + + . "github.com/aerospike/aerospike-client-go/types" + . "github.com/aerospike/aerospike-client-go/types/atomic" +) + +type Result struct { + Record *Record + Err error +} + +// String implements the Stringer interface +func (res *Result) String() string { + if res.Record != nil { + return fmt.Sprintf("%v", res.Record) + } + return fmt.Sprintf("%v", res.Err) +} + +// Objectset encapsulates the result of Scan and Query commands. +type objectset struct { + // a reference to the object channel to close on end signal + objChan reflect.Value + + // Errors is a channel on which all errors will be sent back. + // NOTE: Do not use Errors directly. Range on channel returned by Results() instead. + // This field is deprecated and will be unexported in the future + Errors chan error + + wgGoroutines sync.WaitGroup + goroutines *AtomicInt + + closed, active *AtomicBool + cancelled chan struct{} + + chanLock sync.Mutex + + taskId uint64 +} + +// TaskId returns the transactionId/jobId sent to the server for this recordset. +func (os *objectset) TaskId() uint64 { + return os.taskId +} + +// Recordset encapsulates the result of Scan and Query commands. +type Recordset struct { + objectset + + // Records is a channel on which the resulting records will be sent back. + // NOTE: Do not use Records directly. Range on channel returned by Results() instead. + // Will be unexported in the future + Records chan *Record +} + +// makes sure the recordset is closed eventually, even if it is not consumed +func recordsetFinalizer(rs *Recordset) { + rs.Close() +} + +// newObjectset generates a new RecordSet instance. +func newObjectset(objChan reflect.Value, goroutines int, taskId uint64) *objectset { + + if objChan.Kind() != reflect.Chan || + objChan.Type().Elem().Kind() != reflect.Ptr || + objChan.Type().Elem().Elem().Kind() != reflect.Struct { + panic("Scan/Query object channels should be of type `chan *T`") + } + + rs := &objectset{ + objChan: objChan, + Errors: make(chan error, goroutines), + active: NewAtomicBool(true), + closed: NewAtomicBool(false), + goroutines: NewAtomicInt(goroutines), + cancelled: make(chan struct{}), + taskId: taskId, + } + rs.wgGoroutines.Add(goroutines) + + return rs +} + +// newRecordset generates a new RecordSet instance. +func newRecordset(recSize, goroutines int, taskId uint64) *Recordset { + var nilChan chan *struct{} + + rs := &Recordset{ + Records: make(chan *Record, recSize), + objectset: *newObjectset(reflect.ValueOf(nilChan), goroutines, taskId), + } + + runtime.SetFinalizer(rs, recordsetFinalizer) + return rs +} + +// IsActive returns true if the operation hasn't been finished or cancelled. +func (rcs *Recordset) IsActive() bool { + return rcs.active.Get() +} + +// Read reads the next record from the Recordset. If the Recordset has been +// closed, it returns ErrRecordsetClosed. +func (rcs *Recordset) Read() (record *Record, err error) { + var ok bool + +L: + select { + case record, ok = <-rcs.Records: + if !ok { + err = ErrRecordsetClosed + } + case err = <-rcs.Errors: + if err == nil { + // if err == nil, it means the Errors chan has been closed + // we should not return nil as an error, so we should listen + // to other chans again to determine either cancellation, + // or normal EOR + goto L + } + } + + return record, err +} + +// Results returns a new receive-only channel with the results of the Scan/Query. +// This is a more idiomatic approach to the iterator pattern in getting the +// results back from the recordset, and doesn't require the user to write the +// ugly select in their code. +// Result contains a Record and an error reference. +// +// Example: +// +// recordset, err := client.ScanAll(nil, namespace, set) +// handleError(err) +// for res := range recordset.Results() { +// if res.Err != nil { +// // handle error here +// } else { +// // process record here +// fmt.Println(res.Record.Bins) +// } +// } +func (rcs *Recordset) Results() <-chan *Result { + recCap := cap(rcs.Records) + if recCap < 1 { + recCap = 1 + } + res := make(chan *Result, recCap) + + select { + case <-rcs.cancelled: + // Bail early and give the caller a channel for nothing -- it's + // functionally wasted memory, but the caller did something + // after close, so it's their own doing. + close(res) + return res + default: + } + + go func(cancelled <-chan struct{}) { + defer close(res) + for { + record, err := rcs.Read() + if err == ErrRecordsetClosed { + return + } + + result := &Result{Record: record, Err: err} + select { + case <-cancelled: + return + case res <- result: + + } + } + }(rcs.cancelled) + + return res +} + +// Close all streams from different nodes. A successful close return nil, +// subsequent calls to the method will return ErrRecordsetClosed. +func (rcs *Recordset) Close() error { + // do it only once + if !rcs.closed.CompareAndToggle(false) { + return ErrRecordsetClosed + } + + // mark the recordset as inactive + rcs.active.Set(false) + + close(rcs.cancelled) + + // wait till all goroutines are done, and signalEnd is called by the scan command + rcs.wgGoroutines.Wait() + + return nil +} + +func (rcs *Recordset) signalEnd() { + rcs.wgGoroutines.Done() + if rcs.goroutines.DecrementAndGet() == 0 { + // mark the recordset as inactive + rcs.active.Set(false) + + rcs.chanLock.Lock() + defer rcs.chanLock.Unlock() + + if rcs.Records != nil { + close(rcs.Records) + } else if rcs.objChan.IsValid() { + rcs.objChan.Close() + } + + close(rcs.Errors) + } +} + +func (rcs *Recordset) sendError(err error) { + rcs.chanLock.Lock() + defer rcs.chanLock.Unlock() + if rcs.IsActive() { + rcs.Errors <- err + } +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/replica_policy.go b/vendor/github.com/aerospike/aerospike-client-go/replica_policy.go new file mode 100644 index 00000000000..647ccfd0a6e --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/replica_policy.go @@ -0,0 +1,37 @@ +/* + * Copyright 2013-2017 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package aerospike + +// ReplicaPolicy defines type of node partition targeted by read commands. +type ReplicaPolicy int + +const ( + // MASTER reads from node containing key's master partition. + // This is the default behavior. + MASTER ReplicaPolicy = iota + + // MASTER_PROLES Distributes reads across nodes containing key's master and replicated partitions + // in round-robin fashion. This option requires ClientPolicy.RequestProleReplicas + // to be enabled in order to function properly. + MASTER_PROLES + + // Distribute reads across all nodes in cluster in round-robin fashion. + // This option is useful when the replication factor equals the number + // of nodes in the cluster and the overhead of requesting proles is not desired. + RANDOM +) diff --git a/vendor/github.com/aerospike/aerospike-client-go/role.go b/vendor/github.com/aerospike/aerospike-client-go/role.go new file mode 100644 index 00000000000..b8a65fde20b --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/role.go @@ -0,0 +1,44 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Portions may be licensed to Aerospike, Inc. under one or more contributor +// license agreements. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. + +package aerospike + +// Role allows granular access to database entities for users. +type Role struct { + Name string + + Privileges []Privilege +} + +// Pre-defined user roles. +const ( + // UserAdmin allows to manages users and their roles. + UserAdmin privilegeCode = "user-admin" + + // SysAdmin allows to manage indexes, user defined functions and server configuration. + SysAdmin privilegeCode = "sys-admin" + + // DataAdmin allows to manage indicies and user defined functions. + DataAdmin privilegeCode = "data-admin" + + // ReadWriteUDF allows read, write and UDF transactions with the database. + ReadWriteUDF privilegeCode = "read-write-udf" + + // ReadWrite allows read and write transactions with the database. + ReadWrite privilegeCode = "read-write" + + // Read allows read transactions with the database. + Read privilegeCode = "read" +) diff --git a/vendor/github.com/aerospike/aerospike-client-go/scan_command.go b/vendor/github.com/aerospike/aerospike-client-go/scan_command.go new file mode 100644 index 00000000000..0fceb0bd35e --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/scan_command.go @@ -0,0 +1,71 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import . "github.com/aerospike/aerospike-client-go/types" + +type scanCommand struct { + baseMultiCommand + + policy *ScanPolicy + namespace string + setName string + binNames []string + taskId uint64 +} + +func newScanCommand( + node *Node, + policy *ScanPolicy, + namespace string, + setName string, + binNames []string, + recordset *Recordset, + taskId uint64, +) *scanCommand { + cmd := &scanCommand{ + baseMultiCommand: *newMultiCommand(node, recordset), + policy: policy, + namespace: namespace, + setName: setName, + binNames: binNames, + taskId: taskId, + } + + cmd.terminationErrorType = SCAN_TERMINATED + + return cmd +} + +func (cmd *scanCommand) getPolicy(ifc command) Policy { + return cmd.policy +} + +func (cmd *scanCommand) writeBuffer(ifc command) error { + return cmd.setScan(cmd.policy, &cmd.namespace, &cmd.setName, cmd.binNames, cmd.taskId) +} + +func (cmd *scanCommand) parseResult(ifc command, conn *Connection) error { + return cmd.baseMultiCommand.parseResult(cmd, conn) +} + +func (cmd *scanCommand) Execute() error { + defer cmd.recordset.signalEnd() + err := cmd.execute(cmd) + if err != nil { + cmd.recordset.sendError(err) + } + return err +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/scan_objects_command.go b/vendor/github.com/aerospike/aerospike-client-go/scan_objects_command.go new file mode 100644 index 00000000000..f3d4a6feb1e --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/scan_objects_command.go @@ -0,0 +1,73 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import . "github.com/aerospike/aerospike-client-go/types" + +// . "github.com/aerospike/aerospike-client-go/types/atomic" + +type scanObjectsCommand struct { + baseMultiCommand + + policy *ScanPolicy + namespace string + setName string + binNames []string + taskId uint64 +} + +func newScanObjectsCommand( + node *Node, + policy *ScanPolicy, + namespace string, + setName string, + binNames []string, + recordset *Recordset, + taskId uint64, +) *scanObjectsCommand { + cmd := &scanObjectsCommand{ + baseMultiCommand: *newMultiCommand(node, recordset), + policy: policy, + namespace: namespace, + setName: setName, + binNames: binNames, + taskId: taskId, + } + + cmd.terminationErrorType = SCAN_TERMINATED + + return cmd +} + +func (cmd *scanObjectsCommand) getPolicy(ifc command) Policy { + return cmd.policy +} + +func (cmd *scanObjectsCommand) writeBuffer(ifc command) error { + return cmd.setScan(cmd.policy, &cmd.namespace, &cmd.setName, cmd.binNames, cmd.taskId) +} + +func (cmd *scanObjectsCommand) parseResult(ifc command, conn *Connection) error { + return cmd.baseMultiCommand.parseResult(ifc, conn) +} + +func (cmd *scanObjectsCommand) Execute() error { + defer cmd.recordset.signalEnd() + err := cmd.execute(cmd) + if err != nil { + cmd.recordset.sendError(err) + } + return err +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/scan_policy.go b/vendor/github.com/aerospike/aerospike-client-go/scan_policy.go new file mode 100644 index 00000000000..6167c81f54f --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/scan_policy.go @@ -0,0 +1,60 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import "time" + +// ScanPolicy encapsulates parameters used in scan operations. +type ScanPolicy struct { + *MultiPolicy + + // ScanPercent determines percent of data to scan. + // Valid integer range is 1 to 100. + // Default is 100. + ScanPercent int //= 100; + + // ServerSocketTimeout defines maximum time that the server will before droping an idle socket. + // Zero means there is no socket timeout. + // Default is 10 seconds. + ServerSocketTimeout time.Duration //= 10 seconds + + // ConcurrentNodes determines how to issue scan requests (in parallel or sequentially). + ConcurrentNodes bool //= true; + + // Indicates if bin data is retrieved. If false, only record digests are retrieved. + IncludeBinData bool //= true; + + // Include large data type bin values in addition to large data type bin names. + // If false, LDT bin names will be returned, but LDT bin values will be empty. + // If true, LDT bin names and the entire LDT bin values will be returned. + // Warning: LDT values may consume huge of amounts of memory depending on LDT size. + IncludeLDT bool + + // FailOnClusterChange determines scan termination if cluster is in fluctuating state. + FailOnClusterChange bool +} + +// NewScanPolicy creates a new ScanPolicy instance with default values. +func NewScanPolicy() *ScanPolicy { + return &ScanPolicy{ + MultiPolicy: NewMultiPolicy(), + ScanPercent: 100, + ServerSocketTimeout: 10 * time.Second, + ConcurrentNodes: true, + IncludeBinData: true, + IncludeLDT: false, + FailOnClusterChange: true, + } +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/server_command.go b/vendor/github.com/aerospike/aerospike-client-go/server_command.go new file mode 100644 index 00000000000..17b29819420 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/server_command.go @@ -0,0 +1,90 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + // "fmt" + + // . "github.com/aerospike/aerospike-client-go/logger" + . "github.com/aerospike/aerospike-client-go/types" + Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" +) + +type serverCommand struct { + queryCommand +} + +func newServerCommand(node *Node, policy *QueryPolicy, statement *Statement) *serverCommand { + return &serverCommand{ + queryCommand: *newQueryCommand(node, policy, statement, nil), + } +} + +func (cmd *serverCommand) parseRecordResults(ifc command, receiveSize int) (bool, error) { + // Server commands (Query/Execute UDF) should only send back a return code. + // Keep parsing logic to empty socket buffer just in case server does + // send records back. + cmd.dataOffset = 0 + + for cmd.dataOffset < receiveSize { + if err := cmd.readBytes(int(_MSG_REMAINING_HEADER_SIZE)); err != nil { + return false, err + } + resultCode := ResultCode(cmd.dataBuffer[5] & 0xFF) + + if resultCode != 0 { + if resultCode == KEY_NOT_FOUND_ERROR { + return false, nil + } + return false, NewAerospikeError(resultCode) + } + + info3 := int(cmd.dataBuffer[3]) + + // If cmd is the end marker of the response, do not proceed further + if (info3 & _INFO3_LAST) == _INFO3_LAST { + return false, nil + } + + fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 18)) + opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 20)) + + if _, err := cmd.parseKey(fieldCount); err != nil { + return false, err + } + + for i := 0; i < opCount; i++ { + if err := cmd.readBytes(8); err != nil { + return false, err + } + opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, 0)) + nameSize := int(cmd.dataBuffer[7]) + + if err := cmd.readBytes(nameSize); err != nil { + return false, err + } + + particleBytesSize := int((opSize - (4 + nameSize))) + if err := cmd.readBytes(particleBytesSize); err != nil { + return false, err + } + } + } + return true, nil +} + +func (cmd *serverCommand) Execute() error { + return cmd.execute(cmd) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/single_command.go b/vendor/github.com/aerospike/aerospike-client-go/single_command.go new file mode 100644 index 00000000000..18feaabd51e --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/single_command.go @@ -0,0 +1,65 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "time" + + Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" +) + +type singleCommand struct { + baseCommand + + cluster *Cluster + key *Key + partition Partition +} + +func newSingleCommand(cluster *Cluster, key *Key) singleCommand { + return singleCommand{ + baseCommand: baseCommand{}, + cluster: cluster, + key: key, + partition: newPartitionByKey(key), + } +} + +func (cmd *singleCommand) getConnection(timeout time.Duration) (*Connection, error) { + return cmd.node.getConnectionWithHint(timeout, cmd.key.digest[0]) +} + +func (cmd *singleCommand) putConnection(conn *Connection) { + cmd.node.putConnectionWithHint(conn, cmd.key.digest[0]) +} + +func (cmd *singleCommand) emptySocket(conn *Connection) error { + // There should not be any more bytes. + // Empty the socket to be safe. + sz := Buffer.BytesToInt64(cmd.dataBuffer, 0) + headerLength := cmd.dataBuffer[8] + receiveSize := int(sz&0xFFFFFFFFFFFF) - int(headerLength) + + // Read remaining message bytes. + if receiveSize > 0 { + if err := cmd.sizeBufferSz(receiveSize); err != nil { + return err + } + if _, err := conn.Read(cmd.dataBuffer, receiveSize); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/statement.go b/vendor/github.com/aerospike/aerospike-client-go/statement.go new file mode 100644 index 00000000000..d965c042ebc --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/statement.go @@ -0,0 +1,133 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import xornd "github.com/aerospike/aerospike-client-go/types/rand" + +// Statement encapsulates query statement parameters. +type Statement struct { + // Namespace determines query Namespace + Namespace string + + // SetName determines query Set name (Optional) + SetName string + + // IndexName determines query index name (Optional) + // If not set, the server will determine the index from the filter's bin name. + IndexName string + + // BinNames detemines bin names (optional) + BinNames []string + + // Filters determine query filters (Optional) + // Currently, only one filter is allowed by the server on a secondary index lookup. + // If multiple filters are necessary, see QueryFilter example for a workaround. + // QueryFilter demonstrates how to add additional filters in an user-defined + // aggregation function. + Filters []*Filter + + packageName string + functionName string + functionArgs []Value + + // Ordered list of predicate expressions + predExps []predExp + + // TaskId determines query task id. (Optional) + TaskId uint64 + + // determines if the query should return data + returnData bool +} + +// NewStatement initializes a new Statement instance. +func NewStatement(ns string, set string, binNames ...string) *Statement { + return &Statement{ + Namespace: ns, + SetName: set, + BinNames: binNames, + returnData: true, + TaskId: uint64(xornd.Int64()), + } +} + +// Addfilter adds a filter to the statement. +// Aerospike Server currently only supports using a single filter per statement/query. +func (stmt *Statement) Addfilter(filter *Filter) error { + stmt.Filters = append(stmt.Filters, filter) + + return nil +} + +// SetPredExp sets low-level predicate expressions for the statement in postfix notation. +// Supported only by Aerospike Server v3.12+. +// Predicate expression filters are applied on the query results on the server. +// Predicate expression filters may occur on any bin in the record. +// To learn how to use this API, consult predexp_test.go file. +// +// Postfix notation is described here: http://wiki.c2.com/?PostfixNotation +// +// Example: (c >= 11 and c <= 20) or (d > 3 and (d < 5) +// +// stmt.SetPredExp( +// NewPredExpIntegerValue(11), +// NewPredExpIntegerBin("c"), +// NewPredExpIntegerGreaterEq(), +// NewPredExpIntegerValue(20), +// NewPredExpIntegerBin("c"), +// NewPredExpIntegerLessEq(), +// NewPredExpAnd(2), +// NewPredExpIntegerValue(3), +// NewPredExpIntegerBin("d"), +// NewPredExpIntegerGreater(), +// NewPredExpIntegerValue(5), +// NewPredExpIntegerBin("d"), +// NewPredExpIntegerLess(), +// NewPredExpAnd(2), +// NewPredExpOr(2) +// ); +// +// // Record last update time > 2017-01-15 +// stmt.SetPredExp( +// NewIntegerValue(time.Date(2017, 0, 15, 0, 0, 0, 0, time.UTC).UnixNano()), +// NewPredExpLastUpdate(), +// NewPredExpIntegerGreater(), +// ); +func (stmt *Statement) SetPredExp(predexp ...predExp) error { + stmt.predExps = predexp + return nil +} + +// SetAggregateFunction sets aggregation function parameters. +// This function will be called on both the server +// and client for each selected item. +func (stmt *Statement) SetAggregateFunction(packageName string, functionName string, functionArgs []Value, returnData bool) { + stmt.packageName = packageName + stmt.functionName = functionName + stmt.functionArgs = functionArgs + stmt.returnData = returnData +} + +// IsScan determines is the Statement is a full namespace/set scan or a selective Query. +func (stmt *Statement) IsScan() bool { + return len(stmt.Filters) == 0 +} + +// Always set the taskId client-side to a non-zero random value +func (stmt *Statement) setTaskId() { + for stmt.TaskId == 0 { + stmt.TaskId = uint64(xornd.Int64()) + } +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/task.go b/vendor/github.com/aerospike/aerospike-client-go/task.go new file mode 100644 index 00000000000..da22a61eefe --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/task.go @@ -0,0 +1,78 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "time" +) + +// Task interface defines methods for asynchronous tasks. +type Task interface { + IsDone() (bool, error) + + onComplete(ifc Task) chan error + OnComplete() chan error +} + +// baseTask is used to poll for server task completion. +type baseTask struct { + retries int + cluster *Cluster + done bool + onCompleteChan chan error +} + +// newTask initializes task with fields needed to query server nodes. +func newTask(cluster *Cluster, done bool) *baseTask { + return &baseTask{ + cluster: cluster, + done: done, + } +} + +// Wait for asynchronous task to complete using default sleep interval. +func (btsk *baseTask) onComplete(ifc Task) chan error { + // create the channel if it doesn't exist yet + if btsk.onCompleteChan != nil { + // channel and goroutine already exists; just return the channel + return btsk.onCompleteChan + } + + btsk.onCompleteChan = make(chan error) + + // goroutine will loop every until IsDone() returns true or error + const interval = 1 * time.Second + go func() { + // always close the channel on return + defer close(btsk.onCompleteChan) + + for { + select { + case <-time.After(interval): + done, err := ifc.IsDone() + btsk.retries++ + if err != nil { + btsk.onCompleteChan <- err + return + } else if done { + btsk.onCompleteChan <- nil + return + } + } // select + } // for + }() + + return btsk.onCompleteChan +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/task_drop_index.go b/vendor/github.com/aerospike/aerospike-client-go/task_drop_index.go new file mode 100644 index 00000000000..90a33e45ffa --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/task_drop_index.go @@ -0,0 +1,64 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import "strings" + +// DropIndexTask is used to poll for long running create index completion. +type DropIndexTask struct { + *baseTask + + namespace string + indexName string +} + +// NewDropIndexTask initializes a task with fields needed to query server nodes. +func NewDropIndexTask(cluster *Cluster, namespace string, indexName string) *DropIndexTask { + return &DropIndexTask{ + baseTask: newTask(cluster, false), + namespace: namespace, + indexName: indexName, + } +} + +// IsDone queries all nodes for task completion status. +func (tski *DropIndexTask) IsDone() (bool, error) { + command := "sindex/" + tski.namespace + "/" + tski.indexName + nodes := tski.cluster.GetNodes() + complete := false + + for _, node := range nodes { + responseMap, err := node.RequestInfo(command) + if err != nil { + return false, err + } + + for _, response := range responseMap { + if strings.Contains(response, "FAIL:201") { + complete = true + continue + } + + return false, nil + } + } + return complete, nil +} + +// OnComplete returns a channel that will be closed as soon as the task is finished. +// If an error is encountered during operation, an error will be sent on the channel. +func (tski *DropIndexTask) OnComplete() chan error { + return tski.onComplete(tski) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/task_index.go b/vendor/github.com/aerospike/aerospike-client-go/task_index.go new file mode 100644 index 00000000000..7d408bb9c89 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/task_index.go @@ -0,0 +1,82 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "regexp" + "strconv" + "strings" +) + +// IndexTask is used to poll for long running create index completion. +type IndexTask struct { + *baseTask + + namespace string + indexName string +} + +// NewIndexTask initializes a task with fields needed to query server nodes. +func NewIndexTask(cluster *Cluster, namespace string, indexName string) *IndexTask { + return &IndexTask{ + baseTask: newTask(cluster, false), + namespace: namespace, + indexName: indexName, + } +} + +// IsDone queries all nodes for task completion status. +func (tski *IndexTask) IsDone() (bool, error) { + command := "sindex/" + tski.namespace + "/" + tski.indexName + nodes := tski.cluster.GetNodes() + complete := false + + r := regexp.MustCompile(`\.*load_pct=(\d+)\.*`) + + for _, node := range nodes { + responseMap, err := node.RequestInfo(command) + if err != nil { + return false, err + } + + for _, response := range responseMap { + find := "load_pct=" + index := strings.Index(response, find) + + if index < 0 { + if tski.retries > 2 { + complete = true + } + continue + } + + matchRes := r.FindStringSubmatch(response) + // we know it exists and is a valid number + pct, _ := strconv.Atoi(matchRes[1]) + + if pct >= 0 && pct < 100 { + return false, nil + } + complete = true + } + } + return complete, nil +} + +// OnComplete returns a channel that will be closed as soon as the task is finished. +// If an error is encountered during operation, an error will be sent on the channel. +func (tski *IndexTask) OnComplete() chan error { + return tski.onComplete(tski) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/task_register.go b/vendor/github.com/aerospike/aerospike-client-go/task_register.go new file mode 100644 index 00000000000..2ce4e349c5c --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/task_register.go @@ -0,0 +1,65 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "strings" +) + +// RegisterTask is used to poll for UDF registration completion. +type RegisterTask struct { + *baseTask + + packageName string +} + +// NewRegisterTask initializes a RegisterTask with fields needed to query server nodes. +func NewRegisterTask(cluster *Cluster, packageName string) *RegisterTask { + return &RegisterTask{ + baseTask: newTask(cluster, false), + packageName: packageName, + } +} + +// IsDone will query all nodes for task completion status. +func (tskr *RegisterTask) IsDone() (bool, error) { + command := "udf-list" + nodes := tskr.cluster.GetNodes() + done := false + + for _, node := range nodes { + responseMap, err := node.RequestInfo(command) + if err != nil { + return false, err + } + + for _, response := range responseMap { + find := "filename=" + tskr.packageName + index := strings.Index(response, find) + + if index < 0 { + return false, nil + } + done = true + } + } + return done, nil +} + +// OnComplete returns a channel that will be closed as soon as the task is finished. +// If an error is encountered during operation, an error will be sent on the channel. +func (tskr *RegisterTask) OnComplete() chan error { + return tskr.onComplete(tskr) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/task_remove.go b/vendor/github.com/aerospike/aerospike-client-go/task_remove.go new file mode 100644 index 00000000000..6b0537c56f7 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/task_remove.go @@ -0,0 +1,65 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "strings" +) + +// RemoveTask is used to poll for UDF registration completion. +type RemoveTask struct { + *baseTask + + packageName string +} + +// NewRemoveTask initializes a RemoveTask with fields needed to query server nodes. +func NewRemoveTask(cluster *Cluster, packageName string) *RemoveTask { + return &RemoveTask{ + baseTask: newTask(cluster, false), + packageName: packageName, + } +} + +// IsDone will query all nodes for task completion status. +func (tskr *RemoveTask) IsDone() (bool, error) { + command := "udf-list" + nodes := tskr.cluster.GetNodes() + done := false + + for _, node := range nodes { + responseMap, err := node.RequestInfo(command) + if err != nil { + return false, err + } + + for _, response := range responseMap { + find := "filename=" + tskr.packageName + index := strings.Index(response, find) + + if index >= 0 { + return false, nil + } + done = true + } + } + return done, nil +} + +// OnComplete returns a channel that will be closed as soon as the task is finished. +// If an error is encountered during operation, an error will be sent on the channel. +func (tskr *RemoveTask) OnComplete() chan error { + return tskr.onComplete(tskr) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/touch_command.go b/vendor/github.com/aerospike/aerospike-client-go/touch_command.go new file mode 100644 index 00000000000..1f9cb6f6365 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/touch_command.go @@ -0,0 +1,78 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + . "github.com/aerospike/aerospike-client-go/types" + Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" +) + +// guarantee touchCommand implements command interface +var _ command = &touchCommand{} + +type touchCommand struct { + singleCommand + + policy *WritePolicy +} + +func newTouchCommand(cluster *Cluster, policy *WritePolicy, key *Key) *touchCommand { + newTouchCmd := &touchCommand{ + singleCommand: newSingleCommand(cluster, key), + policy: policy, + } + + return newTouchCmd +} + +func (cmd *touchCommand) getPolicy(ifc command) Policy { + return cmd.policy +} + +func (cmd *touchCommand) writeBuffer(ifc command) error { + return cmd.setTouch(cmd.policy, cmd.key) +} + +func (cmd *touchCommand) getNode(ifc command) (*Node, error) { + return cmd.cluster.getMasterNode(&cmd.partition) +} + +func (cmd *touchCommand) parseResult(ifc command, conn *Connection) error { + // Read header. + if _, err := conn.Read(cmd.dataBuffer, int(_MSG_TOTAL_HEADER_SIZE)); err != nil { + return err + } + + header := Buffer.BytesToInt64(cmd.dataBuffer, 0) + + // Validate header to make sure we are at the beginning of a message + if err := cmd.validateHeader(header); err != nil { + return err + } + + resultCode := cmd.dataBuffer[13] & 0xFF + + if resultCode != 0 { + return NewAerospikeError(ResultCode(resultCode)) + } + if err := cmd.emptySocket(conn); err != nil { + return err + } + return nil +} + +func (cmd *touchCommand) Execute() error { + return cmd.execute(cmd) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/types/atomic/array.go b/vendor/github.com/aerospike/aerospike-client-go/types/atomic/array.go new file mode 100644 index 00000000000..ba63ee21016 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/types/atomic/array.go @@ -0,0 +1,72 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package atomic + +import ( + "fmt" + "sync" +) + +// AtomicArray implement a fixed width array with atomic semantics +type AtomicArray struct { + items []interface{} + length int + mutex sync.RWMutex +} + +// NewAtomicArray generates a new AtomicArray instance. +func NewAtomicArray(length int) *AtomicArray { + return &AtomicArray{ + length: length, + items: make([]interface{}, length), + } +} + +// Get atomically retrieves an element from the Array. +// If idx is out of range, it will return nil +func (aa *AtomicArray) Get(idx int) interface{} { + // do not lock if not needed + if idx < 0 || idx >= aa.length { + return nil + } + + aa.mutex.RLock() + res := aa.items[idx] + aa.mutex.RUnlock() + return res +} + +// Set atomically sets an element in the Array. +// If idx is out of range, it will return an error +func (aa *AtomicArray) Set(idx int, node interface{}) error { + // do not lock if not needed + if idx < 0 || idx >= aa.length { + return fmt.Errorf("index %d is larger than array size (%d)", idx, aa.length) + } + + aa.mutex.Lock() + aa.items[idx] = node + aa.mutex.Unlock() + return nil +} + +// Length returns the array size. +func (aa *AtomicArray) Length() int { + aa.mutex.RLock() + res := aa.length + aa.mutex.RUnlock() + + return res +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/types/atomic/bool.go b/vendor/github.com/aerospike/aerospike-client-go/types/atomic/bool.go new file mode 100644 index 00000000000..10a505a1280 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/types/atomic/bool.go @@ -0,0 +1,65 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package atomic + +import "sync/atomic" + +//AtomicBool implements a synchronized boolean value +type AtomicBool struct { + val int32 +} + +// NewAtomicBool generates a new AtomicBoolean instance. +func NewAtomicBool(value bool) *AtomicBool { + var i int32 + if value { + i = 1 + } + return &AtomicBool{ + val: i, + } +} + +// Get atomically retrieves the boolean value. +func (ab *AtomicBool) Get() bool { + return atomic.LoadInt32(&(ab.val)) != 0 +} + +// Set atomically sets the boolean value. +func (ab *AtomicBool) Set(newVal bool) { + var i int32 + if newVal { + i = 1 + } + atomic.StoreInt32(&(ab.val), int32(i)) +} + +// Or atomically applies OR operation to the boolean value. +func (ab *AtomicBool) Or(newVal bool) bool { + if !newVal { + return ab.Get() + } + atomic.StoreInt32(&(ab.val), int32(1)) + return true +} + +//CompareAndToggle atomically sets the boolean value if the current value is equal to updated value. +func (ab *AtomicBool) CompareAndToggle(expect bool) bool { + updated := 1 + if expect { + updated = 0 + } + return atomic.CompareAndSwapInt32(&ab.val, int32(1-updated), int32(updated)) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/types/atomic/int.go b/vendor/github.com/aerospike/aerospike-client-go/types/atomic/int.go new file mode 100644 index 00000000000..5bb78d9a9f3 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/types/atomic/int.go @@ -0,0 +1,83 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package atomic + +import "sync/atomic" + +// AtomicInt implements an int value with atomic semantics +type AtomicInt struct { + val int64 +} + +// NewAtomicInt generates a newVal AtomicInt instance. +func NewAtomicInt(value int) *AtomicInt { + return &AtomicInt{ + val: int64(value), + } +} + +// AddAndGet atomically adds the given value to the current value. +func (ai *AtomicInt) AddAndGet(delta int) int { + return int(atomic.AddInt64(&ai.val, int64(delta))) +} + +// CompareAndSet atomically sets the value to the given updated value if the current value == expected value. +// Returns true if the expectation was met +func (ai *AtomicInt) CompareAndSet(expect int, update int) bool { + return atomic.CompareAndSwapInt64(&ai.val, int64(expect), int64(update)) +} + +// DecrementAndGet atomically decrements current value by one and returns the result. +func (ai *AtomicInt) DecrementAndGet() int { + return int(atomic.AddInt64(&ai.val, -1)) +} + +// Get atomically retrieves the current value. +func (ai *AtomicInt) Get() int { + return int(atomic.LoadInt64(&ai.val)) +} + +// GetAndAdd atomically adds the given delta to the current value and returns the result. +func (ai *AtomicInt) GetAndAdd(delta int) int { + newVal := atomic.AddInt64(&ai.val, int64(delta)) + return int(newVal - int64(delta)) +} + +// GetAndDecrement atomically decrements the current value by one and returns the result. +func (ai *AtomicInt) GetAndDecrement() int { + newVal := atomic.AddInt64(&ai.val, -1) + return int(newVal + 1) +} + +// GetAndIncrement atomically increments current value by one and returns the result. +func (ai *AtomicInt) GetAndIncrement() int { + newVal := atomic.AddInt64(&ai.val, 1) + return int(newVal - 1) +} + +// GetAndSet atomically sets current value to the given value and returns the old value. +func (ai *AtomicInt) GetAndSet(newValue int) int { + return int(atomic.SwapInt64(&ai.val, int64(newValue))) +} + +// IncrementAndGet atomically increments current value by one and returns the result. +func (ai *AtomicInt) IncrementAndGet() int { + return int(atomic.AddInt64(&ai.val, 1)) +} + +// Set atomically sets current value to the given value. +func (ai *AtomicInt) Set(newValue int) { + atomic.StoreInt64(&ai.val, int64(newValue)) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/types/atomic/queue.go b/vendor/github.com/aerospike/aerospike-client-go/types/atomic/queue.go new file mode 100644 index 00000000000..49df045a8c9 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/types/atomic/queue.go @@ -0,0 +1,81 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package atomic + +import "sync" + +// AtomicQueue is a non-blocking FIFO queue. +// If the queue is empty, nil is returned. +// if the queue is full, offer will return false +type AtomicQueue struct { + head, tail uint32 + data []interface{} + size uint32 + wrapped bool + mutex sync.Mutex +} + +// NewQueue creates a new queue with initial size. +func NewAtomicQueue(size int) *AtomicQueue { + if size <= 0 { + panic("Queue size cannot be less than 1") + } + + return &AtomicQueue{ + wrapped: false, + data: make([]interface{}, uint32(size)), + size: uint32(size), + } +} + +// Offer adds an item to the queue unless the queue is full. +// In case the queue is full, the item will not be added to the queue +// and false will be returned +func (q *AtomicQueue) Offer(obj interface{}) bool { + q.mutex.Lock() + + // make sure queue is not full + if q.tail == q.head && q.wrapped { + q.mutex.Unlock() + return false + } + + if q.head+1 == q.size { + q.wrapped = true + } + + q.head = (q.head + 1) % q.size + q.data[q.head] = obj + q.mutex.Unlock() + return true +} + +// Poll removes and returns an item from the queue. +// If the queue is empty, nil will be returned. +func (q *AtomicQueue) Poll() (res interface{}) { + q.mutex.Lock() + + // if queue is not empty + if q.wrapped || (q.tail != q.head) { + if q.tail+1 == q.size { + q.wrapped = false + } + q.tail = (q.tail + 1) % q.size + res = q.data[q.tail] + } + + q.mutex.Unlock() + return res +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/types/atomic/sync_val.go b/vendor/github.com/aerospike/aerospike-client-go/types/atomic/sync_val.go new file mode 100644 index 00000000000..650a27a7412 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/types/atomic/sync_val.go @@ -0,0 +1,51 @@ +package atomic + +import "sync" + +type SyncVal struct { + val interface{} + lock sync.RWMutex +} + +// NewSyncVal creates a new instance of SyncVal +func NewSyncVal(val interface{}) *SyncVal { + return &SyncVal{val: val} +} + +// Set updates the value of SyncVal with the passed argument +func (sv *SyncVal) Set(val interface{}) { + sv.lock.Lock() + sv.val = val + sv.lock.Unlock() +} + +// Get returns the value inside the SyncVal +func (sv *SyncVal) Get() interface{} { + sv.lock.RLock() + val := sv.val + sv.lock.RUnlock() + return val +} + +// GetSyncedVia returns the value returned by the function f. +func (sv *SyncVal) GetSyncedVia(f func(interface{}) (interface{}, error)) (interface{}, error) { + sv.lock.RLock() + defer sv.lock.RUnlock() + + val, err := f(sv.val) + return val, err +} + +// Update gets a function and passes the value of SyncVal to it. +// If the resulting err is nil, it will update the value of SyncVal. +// It will return the resulting error to the caller. +func (sv *SyncVal) Update(f func(interface{}) (interface{}, error)) error { + sv.lock.Lock() + defer sv.lock.Unlock() + + val, err := f(sv.val) + if err == nil { + sv.val = val + } + return err +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/types/buffer_pool.go b/vendor/github.com/aerospike/aerospike-client-go/types/buffer_pool.go new file mode 100644 index 00000000000..3855ec1165c --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/types/buffer_pool.go @@ -0,0 +1,75 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import "sync" + +// BufferPool implements a specialized buffer pool. +// Pool size will be limited, and each buffer size will be +// constrained to the init and max buffer sizes. +type BufferPool struct { + pool [][]byte + poolSize int + + pos int64 + + maxBufSize int + initBufSize int + + mutex sync.Mutex +} + +// NewBufferPool creates a new buffer pool. +// New buffers will be created with size and capacity of initBufferSize. +// If cap(buffer) is larger than maxBufferSize when it is put back in the buffer, +// it will be thrown away. This will prevent unwanted memory bloat and +// set a deterministic maximum-size for the pool which will not be exceeded. +func NewBufferPool(poolSize, initBufferSize, maxBufferSize int) *BufferPool { + return &BufferPool{ + pool: make([][]byte, poolSize), + pos: -1, + poolSize: poolSize, + maxBufSize: maxBufferSize, + initBufSize: initBufferSize, + } +} + +// Get returns a buffer from the pool. If pool is empty, a new buffer of +// size initBufSize will be created and returned. +func (bp *BufferPool) Get() (res []byte) { + bp.mutex.Lock() + if bp.pos >= 0 { + res = bp.pool[bp.pos] + bp.pos-- + } else { + res = make([]byte, bp.initBufSize, bp.initBufSize) + } + + bp.mutex.Unlock() + return res +} + +// Put will put the buffer back in the pool, unless cap(buf) is bigger than +// initBufSize, in which case it will be thrown away +func (bp *BufferPool) Put(buf []byte) { + if len(buf) <= bp.maxBufSize { + bp.mutex.Lock() + if bp.pos < int64(bp.poolSize-1) { + bp.pos++ + bp.pool[bp.pos] = buf + } + bp.mutex.Unlock() + } +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/types/epoc.go b/vendor/github.com/aerospike/aerospike-client-go/types/epoc.go new file mode 100644 index 00000000000..af403e11024 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/types/epoc.go @@ -0,0 +1,22 @@ +package types + +import ( + "math" + "time" +) + +const ( + // citrusleaf epoc: Jan 01 2010 00:00:00 GMT + CITRUSLEAF_EPOCH = 1262304000 +) + +// TTL converts an Expiration time from citrusleaf epoc to TTL in seconds. +func TTL(secsFromCitrusLeafEpoc uint32) uint32 { + switch secsFromCitrusLeafEpoc { + // don't convert magic values + case 0: // when set to don't expire, this value is returned + return math.MaxUint32 + default: + return uint32(int64(CITRUSLEAF_EPOCH+secsFromCitrusLeafEpoc) - time.Now().Unix()) + } +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/types/error.go b/vendor/github.com/aerospike/aerospike-client-go/types/error.go new file mode 100644 index 00000000000..0c2ab50380e --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/types/error.go @@ -0,0 +1,50 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "errors" + "strings" +) + +// AerospikeError implements error interface for aerospike specific errors. +// All errors returning from the library are of this type. +// Errors resulting from Go's stdlib are not translated to this type, unless +// they are a net.Timeout error. +type AerospikeError struct { + error + + resultCode ResultCode +} + +// ResultCode returns the ResultCode from AerospikeError object. +func (ase AerospikeError) ResultCode() ResultCode { + return ase.resultCode +} + +// New AerospikeError generates a new AerospikeError instance. +// If no message is provided, the result code will be translated into the default +// error message automatically. +func NewAerospikeError(code ResultCode, messages ...string) error { + if len(messages) == 0 { + messages = []string{ResultCodeToString(code)} + } + + err := errors.New(strings.Join(messages, " ")) + return AerospikeError{error: err, resultCode: code} +} + +var ErrRecordsetClosed = NewAerospikeError(RECORDSET_CLOSED, "Recordset has already been closed.") +var ErrConnectionPoolEmpty = NewAerospikeError(NO_AVAILABLE_CONNECTIONS_TO_NODE) diff --git a/vendor/github.com/aerospike/aerospike-client-go/types/message.go b/vendor/github.com/aerospike/aerospike-client-go/types/message.go new file mode 100644 index 00000000000..cba6ff4054c --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/types/message.go @@ -0,0 +1,98 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "bytes" + "encoding/binary" + "fmt" +) + +type messageType uint8 + +const ( + MSG_HEADER_SIZE = 8 //sizeof(MessageHeader) + + MSG_INFO messageType = 1 + MSG_MESSAGE = 3 +) + +type MessageHeader struct { + Version uint8 + Type uint8 + DataLen [6]byte +} + +// Length returns the length of the message +func (msg *MessageHeader) Length() int64 { + return msgLenFromBytes(msg.DataLen) +} + +type Message struct { + MessageHeader + + Data []byte +} + +// NewMessage generates a new Message instance. +func NewMessage(mtype messageType, data []byte) *Message { + return &Message{ + MessageHeader: MessageHeader{ + Version: uint8(2), + Type: uint8(mtype), + DataLen: msgLenToBytes(int64(len(data))), + }, + Data: data, + } +} + +const maxAllowedBufferSize = 1024 * 1024 + +// Resize changes the internal buffer size for the message. +func (msg *Message) Resize(newSize int64) error { + if newSize > maxAllowedBufferSize || newSize < 0 { + return fmt.Errorf("Requested new buffer size is invalid. Requested: %d, allowed: 0..%d", newSize, maxAllowedBufferSize) + } + if int64(len(msg.Data)) == newSize { + return nil + } + msg.Data = make([]byte, newSize) + return nil +} + +// Serialize returns a byte slice containing the message. +func (msg *Message) Serialize() []byte { + msg.DataLen = msgLenToBytes(int64(len(msg.Data))) + buf := bytes.NewBuffer([]byte{}) + binary.Write(buf, binary.BigEndian, msg.MessageHeader) + binary.Write(buf, binary.BigEndian, msg.Data[:]) + + return buf.Bytes() +} + +func msgLenFromBytes(buf [6]byte) int64 { + nbytes := append([]byte{0, 0}, buf[:]...) + DataLen := binary.BigEndian.Uint64(nbytes) + return int64(DataLen) +} + +// converts a +func msgLenToBytes(DataLen int64) [6]byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(DataLen)) + res := [6]byte{} + copy(res[:], b[2:]) + return res +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/types/particle_type/particle_type.go b/vendor/github.com/aerospike/aerospike-client-go/types/particle_type/particle_type.go new file mode 100644 index 00000000000..ad29ec8e3b2 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/types/particle_type/particle_type.go @@ -0,0 +1,42 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package particleType + +const ( + // Server particle types. Unsupported types are commented out. + NULL = 0 + INTEGER = 1 + FLOAT = 2 + STRING = 3 + BLOB = 4 + // TIMESTAMP = 5 + DIGEST = 6 + // JBLOB = 7 + // CSHARP_BLOB = 8 + // PYTHON_BLOB = 9 + // RUBY_BLOB = 10 + // PHP_BLOB = 11 + // ERLANG_BLOB = 12 + // SEGMENT_POINTER = 13 + // RTA_LIST = 14 + // RTA_DICT = 15 + // RTA_APPEND_DICT = 16 + // RTA_APPEND_LIST = 17 + // LUA_BLOB = 18 + MAP = 19 + LIST = 20 + LDT = 21 + GEOJSON = 23 +) diff --git a/vendor/github.com/aerospike/aerospike-client-go/types/pool.go b/vendor/github.com/aerospike/aerospike-client-go/types/pool.go new file mode 100644 index 00000000000..f4f8497fa09 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/types/pool.go @@ -0,0 +1,72 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + . "github.com/aerospike/aerospike-client-go/types/atomic" +) + +// Pool implements a general purpose fixed-size pool. +type Pool struct { + pool *AtomicQueue + + // New will create a new object + New func(params ...interface{}) interface{} + // IsUsable checks if the object polled from the pool is still fresh and usable + IsUsable func(obj interface{}, params ...interface{}) bool + // CanReturn checkes if the object is eligible to go back to the pool + CanReturn func(obj interface{}) bool + // Finalize will be called when an object is not eligible to go back to the pool. + // Usable to close connections, file handles, ... + Finalize func(obj interface{}) +} + +// NewPool creates a new fixed size pool. +func NewPool(poolSize int) *Pool { + return &Pool{ + pool: NewAtomicQueue(poolSize), + } +} + +// Get returns an element from the pool. +// If the pool is empty, or the returned element is not usable, +// nil or the result of the New function will be returned +func (bp *Pool) Get(params ...interface{}) interface{} { + res := bp.pool.Poll() + if res == nil || (bp.IsUsable != nil && !bp.IsUsable(res, params...)) { + // not usable, so finalize + if res != nil && bp.Finalize != nil { + bp.Finalize(res) + } + + if bp.New != nil { + res = bp.New(params...) + } + } + + return res +} + +// Put will add the elem back to the pool, unless the pool is full. +func (bp *Pool) Put(obj interface{}) { + finalize := true + if bp.CanReturn == nil || bp.CanReturn(obj) { + finalize = !bp.pool.Offer(obj) + } + + if finalize && bp.Finalize != nil { + bp.Finalize(obj) + } +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/types/rand/xor_shift128.go b/vendor/github.com/aerospike/aerospike-client-go/types/rand/xor_shift128.go new file mode 100644 index 00000000000..0d46648b341 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/types/rand/xor_shift128.go @@ -0,0 +1,72 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rand + +import ( + "encoding/binary" + "sync/atomic" + "time" +) + +const ( + poolSize = 512 +) + +// random number generator pool +var pool = make([]*Xor128Rand, poolSize) +var pos uint64 + +func init() { + for i := range pool { + pool[i] = NewXorRand() + // to guarantee a different number on less accurate system clocks + time.Sleep(time.Microsecond + 31*time.Nanosecond) + } +} + +func Int64() int64 { + apos := int(atomic.AddUint64(&pos, 1) % poolSize) + return pool[apos].Int64() +} + +type Xor128Rand struct { + src [2]uint64 +} + +func NewXorRand() *Xor128Rand { + t := time.Now().UnixNano() + return &Xor128Rand{[2]uint64{uint64(t), uint64(t)}} +} + +func (r *Xor128Rand) Int64() int64 { + return int64(r.Uint64()) +} + +func (r *Xor128Rand) Uint64() uint64 { + s1 := r.src[0] + s0 := r.src[1] + r.src[0] = s0 + s1 ^= s1 << 23 + r.src[1] = (s1 ^ s0 ^ (s1 >> 17) ^ (s0 >> 26)) + return r.src[1] + s0 +} + +func (r *Xor128Rand) Read(p []byte) (n int, err error) { + l := len(p) / 8 + for i := 0; i < l; i += 8 { + binary.PutUvarint(p[i:], r.Uint64()) + } + return len(p), nil +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/types/result_code.go b/vendor/github.com/aerospike/aerospike-client-go/types/result_code.go new file mode 100644 index 00000000000..c266aac0655 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/types/result_code.go @@ -0,0 +1,507 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import "fmt" + +// ResultCode signifies the database operation error codes. +// The positive numbers align with the server side file proto.h. +type ResultCode int + +const ( + // Cluster Name does not match the ClientPolicy.ClusterName value. + CLUSTER_NAME_MISMATCH_ERROR ResultCode = -10 + + // Recordset has already been closed or cancelled + RECORDSET_CLOSED ResultCode = -9 + + // There were no connections available to the node in the pool, and the pool was limited + NO_AVAILABLE_CONNECTIONS_TO_NODE ResultCode = -8 + + // Data type is not supported by aerospike server. + TYPE_NOT_SUPPORTED ResultCode = -7 + + // Info Command was rejected by the server. + COMMAND_REJECTED ResultCode = -6 + + // Query was terminated by user. + QUERY_TERMINATED ResultCode = -5 + + // Scan was terminated by user. + SCAN_TERMINATED ResultCode = -4 + + // Chosen node is not currently active. + INVALID_NODE_ERROR ResultCode = -3 + + // Client parse error. + PARSE_ERROR ResultCode = -2 + + // Client serialization error. + SERIALIZE_ERROR ResultCode = -1 + + // Operation was successful. + OK ResultCode = 0 + + // Unknown server failure. + SERVER_ERROR ResultCode = 1 + + // On retrieving, touching or replacing a record that doesn't exist. + KEY_NOT_FOUND_ERROR ResultCode = 2 + + // On modifying a record with unexpected generation. + GENERATION_ERROR ResultCode = 3 + + // Bad parameter(s) were passed in database operation call. + PARAMETER_ERROR ResultCode = 4 + + // On create-only (write unique) operations on a record that already + // exists. + KEY_EXISTS_ERROR ResultCode = 5 + + // On create-only (write unique) operations on a bin that already + // exists. + BIN_EXISTS_ERROR ResultCode = 6 + + // Expected cluster ID was not received. + CLUSTER_KEY_MISMATCH ResultCode = 7 + + // Server has run out of memory. + SERVER_MEM_ERROR ResultCode = 8 + + // Client or server has timed out. + TIMEOUT ResultCode = 9 + + // XDS product is not available. + NO_XDS ResultCode = 10 + + // Server is not accepting requests. + SERVER_NOT_AVAILABLE ResultCode = 11 + + // Operation is not supported with configured bin type (single-bin or + // multi-bin). + BIN_TYPE_ERROR ResultCode = 12 + + // Record size exceeds limit. + RECORD_TOO_BIG ResultCode = 13 + + // Too many concurrent operations on the same record. + KEY_BUSY ResultCode = 14 + + // Scan aborted by server. + SCAN_ABORT ResultCode = 15 + + // Unsupported Server Feature (e.g. Scan + UDF) + UNSUPPORTED_FEATURE ResultCode = 16 + + // Specified bin name does not exist in record. + BIN_NOT_FOUND ResultCode = 17 + + // Device not keeping up with writes. + DEVICE_OVERLOAD ResultCode = 18 + + // Key type mismatch. + KEY_MISMATCH ResultCode = 19 + + // Invalid namespace. + INVALID_NAMESPACE ResultCode = 20 + + // Bin name length greater than 14 characters. + BIN_NAME_TOO_LONG ResultCode = 21 + + // Operation not allowed at this time. + FAIL_FORBIDDEN ResultCode = 22 + + // Element Not Found in CDT + FAIL_ELEMENT_NOT_FOUND ResultCode = 23 + + // Element Already Exists in CDT + FAIL_ELEMENT_EXISTS ResultCode = 24 + + // There are no more records left for query. + QUERY_END ResultCode = 50 + + // Security type not supported by connected server. + SECURITY_NOT_SUPPORTED ResultCode = 51 + + // Administration command is invalid. + SECURITY_NOT_ENABLED ResultCode = 52 + + // Administration field is invalid. + SECURITY_SCHEME_NOT_SUPPORTED ResultCode = 53 + + // Administration command is invalid. + INVALID_COMMAND ResultCode = 54 + + // Administration field is invalid. + INVALID_FIELD ResultCode = 55 + + // Security protocol not followed. + ILLEGAL_STATE ResultCode = 56 + + // User name is invalid. + INVALID_USER ResultCode = 60 + + // User was previously created. + USER_ALREADY_EXISTS ResultCode = 61 + + // Password is invalid. + INVALID_PASSWORD ResultCode = 62 + + // Security credential is invalid. + EXPIRED_PASSWORD ResultCode = 63 + + // Forbidden password (e.g. recently used) + FORBIDDEN_PASSWORD ResultCode = 64 + + // Security credential is invalid. + INVALID_CREDENTIAL ResultCode = 65 + + // Role name is invalid. + INVALID_ROLE ResultCode = 70 + + // Role already exists. + ROLE_ALREADY_EXISTS ResultCode = 71 + + // Privilege is invalid. + INVALID_PRIVILEGE ResultCode = 72 + + // User must be authentication before performing database operations. + NOT_AUTHENTICATED ResultCode = 80 + + // User does not posses the required role to perform the database operation. + ROLE_VIOLATION ResultCode = 81 + + // A user defined function returned an error code. + UDF_BAD_RESPONSE ResultCode = 100 + + // The requested item in a large collection was not found. + LARGE_ITEM_NOT_FOUND ResultCode = 125 + + // Batch functionality has been disabled. + BATCH_DISABLED ResultCode = 150 + + // Batch max requests have been exceeded. + BATCH_MAX_REQUESTS_EXCEEDED ResultCode = 151 + + // All batch queues are full. + BATCH_QUEUES_FULL ResultCode = 152 + + // Invalid GeoJSON on insert/update + GEO_INVALID_GEOJSON ResultCode = 160 + + // Secondary index already exists. + INDEX_FOUND ResultCode = 200 + + // Requested secondary index does not exist. + INDEX_NOTFOUND ResultCode = 201 + + // Secondary index memory space exceeded. + INDEX_OOM ResultCode = 202 + + // Secondary index not available. + INDEX_NOTREADABLE ResultCode = 203 + + // Generic secondary index error. + INDEX_GENERIC ResultCode = 204 + + // Index name maximum length exceeded. + INDEX_NAME_MAXLEN ResultCode = 205 + + // Maximum number of indexes exceeded. + INDEX_MAXCOUNT ResultCode = 206 + + // Secondary index query aborted. + QUERY_ABORTED ResultCode = 210 + + // Secondary index queue full. + QUERY_QUEUEFULL ResultCode = 211 + + // Secondary index query timed out on server. + QUERY_TIMEOUT ResultCode = 212 + + // Generic query error. + QUERY_GENERIC ResultCode = 213 + + // Query NetIO error on server + QUERY_NETIO_ERR ResultCode = 214 + + // Duplicate TaskId sent for the statement + QUERY_DUPLICATE ResultCode = 215 + + // UDF does not exist. + AEROSPIKE_ERR_UDF_NOT_FOUND ResultCode = 1301 + + // LUA file does not exist. + AEROSPIKE_ERR_LUA_FILE_NOT_FOUND ResultCode = 1302 +) + +// Should connection be put back into pool. +func KeepConnection(err error) bool { + // if error is not an AerospikeError, Throw the connection away conservatively + ae, ok := err.(AerospikeError) + if !ok { + return false + } + + switch ae.resultCode { + case 0, // Zero Value + QUERY_TERMINATED, + SCAN_TERMINATED, + PARSE_ERROR, + SERIALIZE_ERROR, + SERVER_NOT_AVAILABLE, + SCAN_ABORT, + QUERY_ABORTED, + + INVALID_NODE_ERROR, + SERVER_MEM_ERROR, + TIMEOUT, + INDEX_OOM, + QUERY_TIMEOUT: + return false + default: + return true + } +} + +// Return result code as a string. +func ResultCodeToString(resultCode ResultCode) string { + switch ResultCode(resultCode) { + case CLUSTER_NAME_MISMATCH_ERROR: + return "Cluster Name does not match the ClientPolicy.ClusterName value" + + case RECORDSET_CLOSED: + return "Recordset has already been closed or cancelled." + + case NO_AVAILABLE_CONNECTIONS_TO_NODE: + return "No available connections to the node. Connection Pool was empty, and limited to certain number of connections." + + case TYPE_NOT_SUPPORTED: + return "Type cannot be converted to Value Type." + + case COMMAND_REJECTED: + return "command rejected" + + case QUERY_TERMINATED: + return "Query terminated" + + case SCAN_TERMINATED: + return "Scan terminated" + + case INVALID_NODE_ERROR: + return "Invalid node" + + case PARSE_ERROR: + return "Parse error" + + case SERIALIZE_ERROR: + return "Serialize error" + + case OK: + return "ok" + + case SERVER_ERROR: + return "Server error" + + case KEY_NOT_FOUND_ERROR: + return "Key not found" + + case GENERATION_ERROR: + return "Generation error" + + case PARAMETER_ERROR: + return "Parameter error" + + case KEY_EXISTS_ERROR: + return "Key already exists" + + case BIN_EXISTS_ERROR: + return "Bin already exists" + + case CLUSTER_KEY_MISMATCH: + return "Cluster key mismatch" + + case SERVER_MEM_ERROR: + return "Server memory error" + + case TIMEOUT: + return "Timeout" + + case NO_XDS: + return "XDS not available" + + case SERVER_NOT_AVAILABLE: + return "Server not available" + + case BIN_TYPE_ERROR: + return "Bin type error" + + case RECORD_TOO_BIG: + return "Record too big" + + case KEY_BUSY: + return "Hot key" + + case SCAN_ABORT: + return "Scan aborted" + + case UNSUPPORTED_FEATURE: + return "Unsupported Server Feature" + + case BIN_NOT_FOUND: + return "Bin not found" + + case DEVICE_OVERLOAD: + return "Device overload" + + case KEY_MISMATCH: + return "Key mismatch" + + case INVALID_NAMESPACE: + return "Namespace not found" + + case BIN_NAME_TOO_LONG: + return "Bin name length greater than 14 characters" + + case FAIL_FORBIDDEN: + return "Operation not allowed at this time" + + case FAIL_ELEMENT_NOT_FOUND: + return "Element not found." + + case FAIL_ELEMENT_EXISTS: + return "Element exists" + + case QUERY_END: + return "Query end" + + case SECURITY_NOT_SUPPORTED: + return "Security not supported" + + case SECURITY_NOT_ENABLED: + return "Security not enabled" + + case SECURITY_SCHEME_NOT_SUPPORTED: + return "Security scheme not supported" + + case INVALID_COMMAND: + return "Invalid command" + + case INVALID_FIELD: + return "Invalid field" + + case ILLEGAL_STATE: + return "Illegal state" + + case INVALID_USER: + return "Invalid user" + + case USER_ALREADY_EXISTS: + return "User already exists" + + case INVALID_PASSWORD: + return "Invalid password" + + case EXPIRED_PASSWORD: + return "Expired password" + + case FORBIDDEN_PASSWORD: + return "Forbidden password" + + case INVALID_CREDENTIAL: + return "Invalid credential" + + case INVALID_ROLE: + return "Invalid role" + + case ROLE_ALREADY_EXISTS: + return "Role already exists" + + case INVALID_PRIVILEGE: + return "Invalid privilege" + + case NOT_AUTHENTICATED: + return "Not authenticated" + + case ROLE_VIOLATION: + return "Role violation" + + case UDF_BAD_RESPONSE: + return "UDF returned error" + + case LARGE_ITEM_NOT_FOUND: + return "Large collection item not found" + + case BATCH_DISABLED: + return "Batch functionality has been disabled" + + case BATCH_MAX_REQUESTS_EXCEEDED: + return "Batch max requests have been exceeded" + + case BATCH_QUEUES_FULL: + return "All batch queues are full" + + case GEO_INVALID_GEOJSON: + return "Invalid GeoJSON on insert/update" + + case INDEX_FOUND: + return "Index already exists" + + case INDEX_NOTFOUND: + return "Index not found" + + case INDEX_OOM: + return "Index out of memory" + + case INDEX_NOTREADABLE: + return "Index not readable" + + case INDEX_GENERIC: + return "Index error" + + case INDEX_NAME_MAXLEN: + return "Index name max length exceeded" + + case INDEX_MAXCOUNT: + return "Index count exceeds max" + + case QUERY_ABORTED: + return "Query aborted" + + case QUERY_QUEUEFULL: + return "Query queue full" + + case QUERY_TIMEOUT: + return "Query timeout" + + case QUERY_GENERIC: + return "Query error" + + case QUERY_NETIO_ERR: + return "Query NetIO error on server" + + case QUERY_DUPLICATE: + return "Duplicate TaskId sent for the statement" + + case AEROSPIKE_ERR_UDF_NOT_FOUND: + return "UDF does not exist." + + case AEROSPIKE_ERR_LUA_FILE_NOT_FOUND: + return "LUA package/file does not exist." + + default: + return fmt.Sprintf("Error code (%v) not available yet - please file an issue on github.", resultCode) + } +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/types/types.go b/vendor/github.com/aerospike/aerospike-client-go/types/types.go new file mode 100644 index 00000000000..52021a004df --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/types/types.go @@ -0,0 +1,15 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types diff --git a/vendor/github.com/aerospike/aerospike-client-go/udf.go b/vendor/github.com/aerospike/aerospike-client-go/udf.go new file mode 100644 index 00000000000..366d5a625d4 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/udf.go @@ -0,0 +1,11 @@ +package aerospike + +// UDF carries information about UDFs on the server +type UDF struct { + // Filename of the UDF + Filename string + // Hash digest of the UDF + Hash string + // Language of UDF + Language Language +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/unpacker.go b/vendor/github.com/aerospike/aerospike-client-go/unpacker.go new file mode 100644 index 00000000000..869b81618d4 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/unpacker.go @@ -0,0 +1,390 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "errors" + "fmt" + "reflect" + + . "github.com/aerospike/aerospike-client-go/types" + ParticleType "github.com/aerospike/aerospike-client-go/types/particle_type" + Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" +) + +type unpacker struct { + buffer []byte + offset int + length int +} + +func newUnpacker(buffer []byte, offset int, length int) *unpacker { + return &unpacker{ + buffer: buffer, + offset: offset, + length: length, + } +} + +func (upckr *unpacker) UnpackList() ([]interface{}, error) { + if upckr.length <= 0 { + return nil, nil + } + + theType := upckr.buffer[upckr.offset] & 0xff + upckr.offset++ + var count int + + if (theType & 0xf0) == 0x90 { + count = int(theType & 0x0f) + } else if theType == 0xdc { + count = int(Buffer.BytesToUint16(upckr.buffer, upckr.offset)) + upckr.offset += 2 + } else if theType == 0xdd { + count = int(Buffer.BytesToUint32(upckr.buffer, upckr.offset)) + upckr.offset += 4 + } else { + return nil, nil + } + + return upckr.unpackList(count) +} + +func (upckr *unpacker) unpackList(count int) ([]interface{}, error) { + out := make([]interface{}, 0, count) + + for i := 0; i < count; i++ { + obj, err := upckr.unpackObject(false) + if err != nil { + return nil, err + } + out = append(out, obj) + } + return out, nil +} + +func (upckr *unpacker) UnpackMap() (interface{}, error) { + if upckr.length <= 0 { + return nil, nil + } + + theType := upckr.buffer[upckr.offset] & 0xff + upckr.offset++ + var count int + + if (theType & 0xf0) == 0x80 { + count = int(theType & 0x0f) + } else if theType == 0xde { + count = int(Buffer.BytesToUint16(upckr.buffer, upckr.offset)) + upckr.offset += 2 + } else if theType == 0xdf { + count = int(Buffer.BytesToUint32(upckr.buffer, upckr.offset)) + upckr.offset += 4 + } else { + return make(map[interface{}]interface{}), nil + } + return upckr.unpackMap(count) +} + +func (upckr *unpacker) unpackMap(count int) (interface{}, error) { + if count <= 0 { + return make(map[interface{}]interface{}), nil + } + + if upckr.isMapCDT() { + return upckr.unpackCDTMap(count) + } + return upckr.unpackMapNormal(count) +} + +func (upckr *unpacker) unpackMapNormal(count int) (map[interface{}]interface{}, error) { + out := make(map[interface{}]interface{}, count) + + for i := 0; i < count; i++ { + key, err := upckr.unpackObject(true) + if err != nil { + return nil, err + } + + val, err := upckr.unpackObject(false) + if err != nil { + return nil, err + } + out[key] = val + } + return out, nil +} + +func (upckr *unpacker) unpackCDTMap(count int) ([]MapPair, error) { + out := make([]MapPair, 0, count-1) + + for i := 0; i < count; i++ { + key, err := upckr.unpackObject(true) + if err != nil && err != skipHeaderErr { + return nil, err + } + + val, err := upckr.unpackObject(false) + if err != nil && err != skipHeaderErr { + return nil, err + } + + if key != nil { + out = append(out, MapPair{Key: key, Value: val}) + } + } + + return out, nil +} + +func (upckr *unpacker) isMapCDT() bool { + // make sure the buffer is long enough (for empty maps), and map type is ordered map + if upckr.offset >= len(upckr.buffer) || upckr.buffer[upckr.offset]&0xff != 0xc7 { + return false + } + + extensionType := upckr.buffer[upckr.offset+1] & 0xff + + if extensionType == 0 { + mapBits := upckr.buffer[upckr.offset+2] & 0xff + + // Extension is a map type. Determine which one. + if (mapBits & (0x04 | 0x08)) != 0 { + // Index/rank range result where order needs to be preserved. + return true + } else if (mapBits & 0x01) != 0 { + // Sorted map + return true + } + } + + return false +} + +func (upckr *unpacker) unpackObjects() (interface{}, error) { + if upckr.length <= 0 { + return nil, nil + } + + return upckr.unpackObject(false) +} + +func (upckr *unpacker) unpackBlob(count int, isMapKey bool) (interface{}, error) { + theType := upckr.buffer[upckr.offset] & 0xff + upckr.offset++ + count-- + var val interface{} + + switch theType { + case ParticleType.STRING: + val = string(upckr.buffer[upckr.offset : upckr.offset+count]) + + case ParticleType.BLOB: + if isMapKey { + b := reflect.Indirect(reflect.New(reflect.ArrayOf(count, reflect.TypeOf(byte(0))))) + reflect.Copy(b, reflect.ValueOf(upckr.buffer[upckr.offset:upckr.offset+count])) + + val = b.Interface() + } else { + b := make([]byte, count) + copy(b, upckr.buffer[upckr.offset:upckr.offset+count]) + val = b + } + + case ParticleType.GEOJSON: + val = NewGeoJSONValue(string(upckr.buffer[upckr.offset : upckr.offset+count])) + + default: + panic(NewAerospikeError(SERIALIZE_ERROR, fmt.Sprintf("Error while unpacking BLOB. Type-header with code `%d` not recognized.", theType))) + } + upckr.offset += count + + return val, nil +} + +var skipHeaderErr = errors.New("Skip the unpacker error") + +func (upckr *unpacker) unpackObject(isMapKey bool) (interface{}, error) { + theType := upckr.buffer[upckr.offset] & 0xff + upckr.offset++ + + switch theType { + case 0xc0: + return nil, nil + + case 0xc3: + return true, nil + + case 0xc2: + return false, nil + + case 0xca: + val := Buffer.BytesToFloat32(upckr.buffer, upckr.offset) + upckr.offset += 4 + return val, nil + + case 0xcb: + val := Buffer.BytesToFloat64(upckr.buffer, upckr.offset) + upckr.offset += 8 + return val, nil + + case 0xcc: + r := upckr.buffer[upckr.offset] & 0xff + upckr.offset++ + + return int(r), nil + + case 0xcd: + val := uint16(Buffer.BytesToInt16(upckr.buffer, upckr.offset)) + upckr.offset += 2 + return int(val), nil + + case 0xce: + val := uint32(Buffer.BytesToInt32(upckr.buffer, upckr.offset)) + upckr.offset += 4 + + if Buffer.Arch64Bits { + return int(val), nil + } + return int64(val), nil + + case 0xcf: + val := Buffer.BytesToInt64(upckr.buffer, upckr.offset) + upckr.offset += 8 + return uint64(val), nil + + case 0xd0: + r := int8(upckr.buffer[upckr.offset]) + upckr.offset++ + return int(r), nil + + case 0xd1: + val := Buffer.BytesToInt16(upckr.buffer, upckr.offset) + upckr.offset += 2 + return int(val), nil + + case 0xd2: + val := Buffer.BytesToInt32(upckr.buffer, upckr.offset) + upckr.offset += 4 + return int(val), nil + + case 0xd3: + val := Buffer.BytesToInt64(upckr.buffer, upckr.offset) + upckr.offset += 8 + if Buffer.Arch64Bits { + return int(val), nil + } + return int64(val), nil + + case 0xc4, 0xd9: + count := int(upckr.buffer[upckr.offset] & 0xff) + upckr.offset++ + return upckr.unpackBlob(count, isMapKey) + + case 0xc5, 0xda: + count := int(Buffer.BytesToUint16(upckr.buffer, upckr.offset)) + upckr.offset += 2 + return upckr.unpackBlob(count, isMapKey) + + case 0xc6, 0xdb: + count := int(Buffer.BytesToUint32(upckr.buffer, upckr.offset)) + upckr.offset += 4 + return upckr.unpackBlob(count, isMapKey) + + case 0xdc: + count := int(Buffer.BytesToUint16(upckr.buffer, upckr.offset)) + upckr.offset += 2 + return upckr.unpackList(count) + + case 0xdd: + count := int(Buffer.BytesToUint32(upckr.buffer, upckr.offset)) + upckr.offset += 4 + return upckr.unpackList(count) + + case 0xde: + count := int(Buffer.BytesToUint16(upckr.buffer, upckr.offset)) + upckr.offset += 2 + return upckr.unpackMap(count) + + case 0xdf: + count := int(Buffer.BytesToUint32(upckr.buffer, upckr.offset)) + upckr.offset += 4 + return upckr.unpackMap(count) + + case 0xd4: + // Skip over type extension with 1 byte + upckr.offset += 1 + 1 + return nil, skipHeaderErr + + case 0xd5: + // Skip over type extension with 2 bytes + upckr.offset += 1 + 2 + return nil, skipHeaderErr + + case 0xd6: + // Skip over type extension with 4 bytes + upckr.offset += 1 + 4 + return nil, skipHeaderErr + + case 0xd7: + // Skip over type extension with 8 bytes + upckr.offset += 1 + 8 + return nil, skipHeaderErr + + case 0xd8: + // Skip over type extension with 16 bytes + upckr.offset += 1 + 16 + return nil, skipHeaderErr + + case 0xc7: // Skip over type extension with 8 bit header and bytes + count := int(upckr.buffer[upckr.offset] & 0xff) + upckr.offset += count + 1 + 1 + return nil, skipHeaderErr + + case 0xc8: // Skip over type extension with 16 bit header and bytes + count := int(Buffer.BytesToInt16(upckr.buffer, upckr.offset)) + upckr.offset += count + 1 + 2 + return nil, skipHeaderErr + + case 0xc9: // Skip over type extension with 32 bit header and bytes + count := int(Buffer.BytesToInt32(upckr.buffer, upckr.offset)) + upckr.offset += count + 1 + 4 + return nil, skipHeaderErr + + default: + if (theType & 0xe0) == 0xa0 { + return upckr.unpackBlob(int(theType&0x1f), isMapKey) + } + + if (theType & 0xf0) == 0x80 { + return upckr.unpackMap(int(theType & 0x0f)) + } + + if (theType & 0xf0) == 0x90 { + count := int(theType & 0x0f) + return upckr.unpackList(count) + } + + if theType < 0x80 { + return int(theType), nil + } + + if theType >= 0xe0 { + return int(int(theType) - 0xe0 - 32), nil + } + } + + return nil, NewAerospikeError(SERIALIZE_ERROR) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/user_roles.go b/vendor/github.com/aerospike/aerospike-client-go/user_roles.go new file mode 100644 index 00000000000..99ea537f2b9 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/user_roles.go @@ -0,0 +1,25 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Portions may be licensed to Aerospike, Inc. under one or more contributor +// license agreements. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. + +package aerospike + +// UserRoles contains information about a user. +type UserRoles struct { + // User name. + User string + + // Roles is a list of assigned roles. + Roles []string +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/utils/buffer/buffer.go b/vendor/github.com/aerospike/aerospike-client-go/utils/buffer/buffer.go new file mode 100644 index 00000000000..e1c84c45cc2 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/utils/buffer/buffer.go @@ -0,0 +1,136 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buffer + +import ( + "encoding/binary" + "fmt" + "math" +) + +const ( + SizeOfInt32 = uintptr(4) + SizeOfInt64 = uintptr(8) + + uint64sz = int(8) + uint32sz = int(4) + uint16sz = int(2) + + float32sz = int(4) + float64sz = int(8) +) + +var SizeOfInt uintptr + +var Arch64Bits bool +var Arch32Bits bool + +func init() { + if 0 == ^uint(0xffffffff) { + SizeOfInt = 4 + } else { + SizeOfInt = 8 + } + Arch64Bits = (SizeOfInt == SizeOfInt64) + Arch32Bits = (SizeOfInt == SizeOfInt32) +} + +// BytesToHexString converts a byte slice into a hex string +func BytesToHexString(buf []byte) string { + hlist := make([]byte, 3*len(buf)) + + for i := range buf { + hex := fmt.Sprintf("%02x ", buf[i]) + idx := i * 3 + copy(hlist[idx:], hex) + } + return string(hlist) +} + +// LittleBytesToInt32 converts a slice into int32; only maximum of 4 bytes will be used +func LittleBytesToInt32(buf []byte, offset int) int32 { + l := len(buf[offset:]) + if l > uint32sz { + l = uint32sz + } + r := int32(binary.LittleEndian.Uint32(buf[offset : offset+l])) + return r +} + +// BytesToInt64 converts a slice into int64; only maximum of 8 bytes will be used +func BytesToInt64(buf []byte, offset int) int64 { + l := len(buf[offset:]) + if l > uint64sz { + l = uint64sz + } + r := int64(binary.BigEndian.Uint64(buf[offset : offset+l])) + return r +} + +func VarBytesToInt64(buf []byte, offset int, len int) int64 { + if len == 8 { + return BytesToInt64(buf, offset) + } else if len == 4 { + return int64(BytesToInt32(buf, offset)) + } else if len == 2 { + return int64(BytesToInt16(buf, offset)) + } + + val := int64(0) + for i := 0; i < len; i++ { + val <<= 8 + val |= int64(buf[offset+i] & 0xFF) + } + return val +} + +// BytesToInt32 converts a slice into int32; only maximum of 4 bytes will be used +func BytesToInt32(buf []byte, offset int) int32 { + return int32(binary.BigEndian.Uint32(buf[offset : offset+uint32sz])) +} + +// BytesToUint32 converts a slice into uint32; only maximum of 4 bytes will be used +func BytesToUint32(buf []byte, offset int) uint32 { + return binary.BigEndian.Uint32(buf[offset : offset+uint32sz]) +} + +// BytesToInt16 converts a slice of bytes to an int16 +func BytesToInt16(buf []byte, offset int) int16 { + return int16(binary.BigEndian.Uint16(buf[offset : offset+uint16sz])) +} + +func BytesToUint16(buf []byte, offset int) uint16 { + return binary.BigEndian.Uint16(buf[offset : offset+uint16sz]) +} + +func BytesToFloat32(buf []byte, offset int) float32 { + bits := binary.BigEndian.Uint32(buf[offset : offset+float32sz]) + return math.Float32frombits(bits) +} + +func BytesToFloat64(buf []byte, offset int) float64 { + bits := binary.BigEndian.Uint64(buf[offset : offset+float64sz]) + return math.Float64frombits(bits) +} + +func GetUnsigned(b byte) int { + r := b + + if r < 0 { + r = r & 0x7f + r = r | 0x80 + } + return int(r) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/value.go b/vendor/github.com/aerospike/aerospike-client-go/value.go new file mode 100644 index 00000000000..2ee2955e3e3 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/value.go @@ -0,0 +1,1053 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "fmt" + "reflect" + "strconv" + + . "github.com/aerospike/aerospike-client-go/types" + ParticleType "github.com/aerospike/aerospike-client-go/types/particle_type" + Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" +) + +// this function will be set in value_slow file if included +var newValueReflect func(interface{}) Value + +// Map pair is used when the client returns sorted maps from the server +// Since the default map in Go is a hash map, we will use a slice +// to return the results in server order +type MapPair struct{ Key, Value interface{} } + +// Value interface is used to efficiently serialize objects into the wire protocol. +type Value interface { + + // Calculate number of vl.bytes necessary to serialize the value in the wire protocol. + estimateSize() (int, error) + + // Serialize the value in the wire protocol. + write(cmd BufferEx) (int, error) + + // Serialize the value using MessagePack. + pack(cmd BufferEx) (int, error) + + // GetType returns wire protocol value type. + GetType() int + + // GetObject returns original value as an interface{}. + GetObject() interface{} + + // String implements Stringer interface. + String() string +} + +type AerospikeBlob interface { + // EncodeBlob returns a byte slice representing the encoding of the + // receiver for transmission to a Decoder, usually of the same + // concrete type. + EncodeBlob() ([]byte, error) +} + +// tryConcreteValue will return an aerospike value. +// If the encoder does not exists, it will not try to use reflection. +func tryConcreteValue(v interface{}) Value { + switch val := v.(type) { + case Value: + return val + case int: + return IntegerValue(val) + case int64: + return LongValue(val) + case string: + return StringValue(val) + case []interface{}: + return ListValue(val) + case map[string]interface{}: + return JsonValue(val) + case map[interface{}]interface{}: + return NewMapValue(val) + case nil: + return nullValue + case []Value: + return NewValueArray(val) + case []byte: + return BytesValue(val) + case int8: + return IntegerValue(int(val)) + case int16: + return IntegerValue(int(val)) + case int32: + return IntegerValue(int(val)) + case uint8: // byte supported here + return IntegerValue(int(val)) + case uint16: + return IntegerValue(int(val)) + case uint32: + return IntegerValue(int(val)) + case float32: + return FloatValue(float64(val)) + case float64: + return FloatValue(val) + case uint: + // if it doesn't overflow int64, it is OK + if int64(val) >= 0 { + return LongValue(int64(val)) + } + case MapIter: + return NewMapperValue(val) + case ListIter: + return NewListerValue(val) + case AerospikeBlob: + return NewBlobValue(val) + + /* + The following cases will try to avoid using reflection by matching against the + internal generic types. + If you have custom type aliases in your code, you can use the same aerospike types to cast your type into, + to avoid hitting the generics. + */ + case []string: + return NewListerValue(stringSlice(val)) + case []int: + return NewListerValue(intSlice(val)) + case []int8: + return NewListerValue(int8Slice(val)) + case []int16: + return NewListerValue(int16Slice(val)) + case []int32: + return NewListerValue(int32Slice(val)) + case []int64: + return NewListerValue(int64Slice(val)) + case []uint16: + return NewListerValue(uint16Slice(val)) + case []uint32: + return NewListerValue(uint32Slice(val)) + case []uint64: + return NewListerValue(uint64Slice(val)) + case []float32: + return NewListerValue(float32Slice(val)) + case []float64: + return NewListerValue(float64Slice(val)) + case map[string]string: + return NewMapperValue(stringStringMap(val)) + case map[string]int: + return NewMapperValue(stringIntMap(val)) + case map[string]int8: + return NewMapperValue(stringInt8Map(val)) + case map[string]int16: + return NewMapperValue(stringInt16Map(val)) + case map[string]int32: + return NewMapperValue(stringInt32Map(val)) + case map[string]int64: + return NewMapperValue(stringInt64Map(val)) + case map[string]uint16: + return NewMapperValue(stringUint16Map(val)) + case map[string]uint32: + return NewMapperValue(stringUint32Map(val)) + case map[string]float32: + return NewMapperValue(stringFloat32Map(val)) + case map[string]float64: + return NewMapperValue(stringFloat64Map(val)) + case map[int]string: + return NewMapperValue(intStringMap(val)) + case map[int]int: + return NewMapperValue(intIntMap(val)) + case map[int]int8: + return NewMapperValue(intInt8Map(val)) + case map[int]int16: + return NewMapperValue(intInt16Map(val)) + case map[int]int32: + return NewMapperValue(intInt32Map(val)) + case map[int]int64: + return NewMapperValue(intInt64Map(val)) + case map[int]uint16: + return NewMapperValue(intUint16Map(val)) + case map[int]uint32: + return NewMapperValue(intUint32Map(val)) + case map[int]float32: + return NewMapperValue(intFloat32Map(val)) + case map[int]float64: + return NewMapperValue(intFloat64Map(val)) + case map[int]interface{}: + return NewMapperValue(intInterfaceMap(val)) + case map[int8]string: + return NewMapperValue(int8StringMap(val)) + case map[int8]int: + return NewMapperValue(int8IntMap(val)) + case map[int8]int8: + return NewMapperValue(int8Int8Map(val)) + case map[int8]int16: + return NewMapperValue(int8Int16Map(val)) + case map[int8]int32: + return NewMapperValue(int8Int32Map(val)) + case map[int8]int64: + return NewMapperValue(int8Int64Map(val)) + case map[int8]uint16: + return NewMapperValue(int8Uint16Map(val)) + case map[int8]uint32: + return NewMapperValue(int8Uint32Map(val)) + case map[int8]float32: + return NewMapperValue(int8Float32Map(val)) + case map[int8]float64: + return NewMapperValue(int8Float64Map(val)) + case map[int8]interface{}: + return NewMapperValue(int8InterfaceMap(val)) + case map[int16]string: + return NewMapperValue(int16StringMap(val)) + case map[int16]int: + return NewMapperValue(int16IntMap(val)) + case map[int16]int8: + return NewMapperValue(int16Int8Map(val)) + case map[int16]int16: + return NewMapperValue(int16Int16Map(val)) + case map[int16]int32: + return NewMapperValue(int16Int32Map(val)) + case map[int16]int64: + return NewMapperValue(int16Int64Map(val)) + case map[int16]uint16: + return NewMapperValue(int16Uint16Map(val)) + case map[int16]uint32: + return NewMapperValue(int16Uint32Map(val)) + case map[int16]float32: + return NewMapperValue(int16Float32Map(val)) + case map[int16]float64: + return NewMapperValue(int16Float64Map(val)) + case map[int16]interface{}: + return NewMapperValue(int16InterfaceMap(val)) + case map[int32]string: + return NewMapperValue(int32StringMap(val)) + case map[int32]int: + return NewMapperValue(int32IntMap(val)) + case map[int32]int8: + return NewMapperValue(int32Int8Map(val)) + case map[int32]int16: + return NewMapperValue(int32Int16Map(val)) + case map[int32]int32: + return NewMapperValue(int32Int32Map(val)) + case map[int32]int64: + return NewMapperValue(int32Int64Map(val)) + case map[int32]uint16: + return NewMapperValue(int32Uint16Map(val)) + case map[int32]uint32: + return NewMapperValue(int32Uint32Map(val)) + case map[int32]float32: + return NewMapperValue(int32Float32Map(val)) + case map[int32]float64: + return NewMapperValue(int32Float64Map(val)) + case map[int32]interface{}: + return NewMapperValue(int32InterfaceMap(val)) + case map[int64]string: + return NewMapperValue(int64StringMap(val)) + case map[int64]int: + return NewMapperValue(int64IntMap(val)) + case map[int64]int8: + return NewMapperValue(int64Int8Map(val)) + case map[int64]int16: + return NewMapperValue(int64Int16Map(val)) + case map[int64]int32: + return NewMapperValue(int64Int32Map(val)) + case map[int64]int64: + return NewMapperValue(int64Int64Map(val)) + case map[int64]uint16: + return NewMapperValue(int64Uint16Map(val)) + case map[int64]uint32: + return NewMapperValue(int64Uint32Map(val)) + case map[int64]float32: + return NewMapperValue(int64Float32Map(val)) + case map[int64]float64: + return NewMapperValue(int64Float64Map(val)) + case map[int64]interface{}: + return NewMapperValue(int64InterfaceMap(val)) + case map[uint16]string: + return NewMapperValue(uint16StringMap(val)) + case map[uint16]int: + return NewMapperValue(uint16IntMap(val)) + case map[uint16]int8: + return NewMapperValue(uint16Int8Map(val)) + case map[uint16]int16: + return NewMapperValue(uint16Int16Map(val)) + case map[uint16]int32: + return NewMapperValue(uint16Int32Map(val)) + case map[uint16]int64: + return NewMapperValue(uint16Int64Map(val)) + case map[uint16]uint16: + return NewMapperValue(uint16Uint16Map(val)) + case map[uint16]uint32: + return NewMapperValue(uint16Uint32Map(val)) + case map[uint16]float32: + return NewMapperValue(uint16Float32Map(val)) + case map[uint16]float64: + return NewMapperValue(uint16Float64Map(val)) + case map[uint16]interface{}: + return NewMapperValue(uint16InterfaceMap(val)) + case map[uint32]string: + return NewMapperValue(uint32StringMap(val)) + case map[uint32]int: + return NewMapperValue(uint32IntMap(val)) + case map[uint32]int8: + return NewMapperValue(uint32Int8Map(val)) + case map[uint32]int16: + return NewMapperValue(uint32Int16Map(val)) + case map[uint32]int32: + return NewMapperValue(uint32Int32Map(val)) + case map[uint32]int64: + return NewMapperValue(uint32Int64Map(val)) + case map[uint32]uint16: + return NewMapperValue(uint32Uint16Map(val)) + case map[uint32]uint32: + return NewMapperValue(uint32Uint32Map(val)) + case map[uint32]float32: + return NewMapperValue(uint32Float32Map(val)) + case map[uint32]float64: + return NewMapperValue(uint32Float64Map(val)) + case map[uint32]interface{}: + return NewMapperValue(uint32InterfaceMap(val)) + case map[float32]string: + return NewMapperValue(float32StringMap(val)) + case map[float32]int: + return NewMapperValue(float32IntMap(val)) + case map[float32]int8: + return NewMapperValue(float32Int8Map(val)) + case map[float32]int16: + return NewMapperValue(float32Int16Map(val)) + case map[float32]int32: + return NewMapperValue(float32Int32Map(val)) + case map[float32]int64: + return NewMapperValue(float32Int64Map(val)) + case map[float32]uint16: + return NewMapperValue(float32Uint16Map(val)) + case map[float32]uint32: + return NewMapperValue(float32Uint32Map(val)) + case map[float32]float32: + return NewMapperValue(float32Float32Map(val)) + case map[float32]float64: + return NewMapperValue(float32Float64Map(val)) + case map[float32]interface{}: + return NewMapperValue(float32InterfaceMap(val)) + case map[float64]string: + return NewMapperValue(float64StringMap(val)) + case map[float64]int: + return NewMapperValue(float64IntMap(val)) + case map[float64]int8: + return NewMapperValue(float64Int8Map(val)) + case map[float64]int16: + return NewMapperValue(float64Int16Map(val)) + case map[float64]int32: + return NewMapperValue(float64Int32Map(val)) + case map[float64]int64: + return NewMapperValue(float64Int64Map(val)) + case map[float64]uint16: + return NewMapperValue(float64Uint16Map(val)) + case map[float64]uint32: + return NewMapperValue(float64Uint32Map(val)) + case map[float64]float32: + return NewMapperValue(float64Float32Map(val)) + case map[float64]float64: + return NewMapperValue(float64Float64Map(val)) + case map[float64]interface{}: + return NewMapperValue(float64InterfaceMap(val)) + case map[string]uint64: + return NewMapperValue(stringUint64Map(val)) + case map[int]uint64: + return NewMapperValue(intUint64Map(val)) + case map[int8]uint64: + return NewMapperValue(int8Uint64Map(val)) + case map[int16]uint64: + return NewMapperValue(int16Uint64Map(val)) + case map[int32]uint64: + return NewMapperValue(int32Uint64Map(val)) + case map[int64]uint64: + return NewMapperValue(int64Uint64Map(val)) + case map[uint16]uint64: + return NewMapperValue(uint16Uint64Map(val)) + case map[uint32]uint64: + return NewMapperValue(uint32Uint64Map(val)) + case map[float32]uint64: + return NewMapperValue(float32Uint64Map(val)) + case map[float64]uint64: + return NewMapperValue(float64Uint64Map(val)) + case map[uint64]string: + return NewMapperValue(uint64StringMap(val)) + case map[uint64]int: + return NewMapperValue(uint64IntMap(val)) + case map[uint64]int8: + return NewMapperValue(uint64Int8Map(val)) + case map[uint64]int16: + return NewMapperValue(uint64Int16Map(val)) + case map[uint64]int32: + return NewMapperValue(uint64Int32Map(val)) + case map[uint64]int64: + return NewMapperValue(uint64Int64Map(val)) + case map[uint64]uint16: + return NewMapperValue(uint64Uint16Map(val)) + case map[uint64]uint32: + return NewMapperValue(uint64Uint32Map(val)) + case map[uint64]uint64: + return NewMapperValue(uint64Uint64Map(val)) + case map[uint64]float32: + return NewMapperValue(uint64Float32Map(val)) + case map[uint64]float64: + return NewMapperValue(uint64Float64Map(val)) + case map[uint64]interface{}: + return NewMapperValue(uint64InterfaceMap(val)) + } + + return nil +} + +// NewValue generates a new Value object based on the type. +// If the type is not supported, NewValue will panic. +// This method is a convenience method, and should not be used +// when absolute performance is required unless for the reason mentioned below. +// +// If you have custom maps or slices like: +// type MyMap map[primitive1]primitive2, eg: map[int]string +// or +// type MySlice []primitive, eg: []float64 +// cast them to their primitive type when passing them to this method: +// v := NewValue(map[int]string(myVar)) +// v := NewValue([]float64(myVar)) +// This way you will avoid hitting reflection. +// To completely avoid reflection in the library, +// use the build tag: as_performance while building your program. +func NewValue(v interface{}) Value { + if value := tryConcreteValue(v); value != nil { + return value + } + + if newValueReflect != nil { + if res := newValueReflect(v); res != nil { + return res + } + } + + // panic for anything that is not supported. + panic(NewAerospikeError(TYPE_NOT_SUPPORTED, fmt.Sprintf("Value type '%v' (%s) not supported (if you are compiling via 'as_performance' tag, use cast either to primitives, or use ListIter or MapIter interfaces.)", v, reflect.TypeOf(v).String()))) +} + +// NullValue is an empty value. +type NullValue struct{} + +var nullValue NullValue + +// NewNullValue generates a NullValue instance. +func NewNullValue() NullValue { + return nullValue +} + +func (vl NullValue) estimateSize() (int, error) { + return 0, nil +} + +func (vl NullValue) write(cmd BufferEx) (int, error) { + return 0, nil +} + +func (vl NullValue) pack(cmd BufferEx) (int, error) { + return __PackNil(cmd) +} + +// GetType returns wire protocol value type. +func (vl NullValue) GetType() int { + return ParticleType.NULL +} + +// GetObject returns original value as an interface{}. +func (vl NullValue) GetObject() interface{} { + return nil +} + +func (vl NullValue) String() string { + return "" +} + +/////////////////////////////////////////////////////////////////////////////// + +// BytesValue encapsulates an array of bytes. +type BytesValue []byte + +// NewBytesValue generates a ByteValue instance. +func NewBytesValue(bytes []byte) BytesValue { + return BytesValue(bytes) +} + +// NewBlobValue accepts an AerospikeBlob interface, and automatically +// converts it to a BytesValue. +// If Encode returns an err, it will panic. +func NewBlobValue(object AerospikeBlob) BytesValue { + buf, err := object.EncodeBlob() + if err != nil { + panic(err) + } + + return NewBytesValue(buf) +} + +func (vl BytesValue) estimateSize() (int, error) { + return len(vl), nil +} + +func (vl BytesValue) write(cmd BufferEx) (int, error) { + return cmd.Write(vl) +} + +func (vl BytesValue) pack(cmd BufferEx) (int, error) { + return __PackBytes(cmd, vl) +} + +// GetType returns wire protocol value type. +func (vl BytesValue) GetType() int { + return ParticleType.BLOB +} + +// GetObject returns original value as an interface{}. +func (vl BytesValue) GetObject() interface{} { + return []byte(vl) +} + +// String implements Stringer interface. +func (vl BytesValue) String() string { + return Buffer.BytesToHexString(vl) +} + +/////////////////////////////////////////////////////////////////////////////// + +// StringValue encapsulates a string value. +type StringValue string + +// NewStringValue generates a StringValue instance. +func NewStringValue(value string) StringValue { + return StringValue(value) +} + +func (vl StringValue) estimateSize() (int, error) { + return len(vl), nil +} + +func (vl StringValue) write(cmd BufferEx) (int, error) { + return cmd.WriteString(string(vl)) +} + +func (vl StringValue) pack(cmd BufferEx) (int, error) { + return __PackString(cmd, string(vl)) +} + +// GetType returns wire protocol value type. +func (vl StringValue) GetType() int { + return ParticleType.STRING +} + +// GetObject returns original value as an interface{}. +func (vl StringValue) GetObject() interface{} { + return string(vl) +} + +// String implements Stringer interface. +func (vl StringValue) String() string { + return string(vl) +} + +/////////////////////////////////////////////////////////////////////////////// + +// IntegerValue encapsulates an integer value. +type IntegerValue int + +// NewIntegerValue generates an IntegerValue instance. +func NewIntegerValue(value int) IntegerValue { + return IntegerValue(value) +} + +func (vl IntegerValue) estimateSize() (int, error) { + return 8, nil +} + +func (vl IntegerValue) write(cmd BufferEx) (int, error) { + return cmd.WriteInt64(int64(vl)) +} + +func (vl IntegerValue) pack(cmd BufferEx) (int, error) { + return __PackAInt64(cmd, int64(vl)) +} + +// GetType returns wire protocol value type. +func (vl IntegerValue) GetType() int { + return ParticleType.INTEGER +} + +// GetObject returns original value as an interface{}. +func (vl IntegerValue) GetObject() interface{} { + return int(vl) +} + +// String implements Stringer interface. +func (vl IntegerValue) String() string { + return strconv.Itoa(int(vl)) +} + +/////////////////////////////////////////////////////////////////////////////// + +// LongValue encapsulates an int64 value. +type LongValue int64 + +// NewLongValue generates a LongValue instance. +func NewLongValue(value int64) LongValue { + return LongValue(value) +} + +func (vl LongValue) estimateSize() (int, error) { + return 8, nil +} + +func (vl LongValue) write(cmd BufferEx) (int, error) { + return cmd.WriteInt64(int64(vl)) +} + +func (vl LongValue) pack(cmd BufferEx) (int, error) { + return __PackAInt64(cmd, int64(vl)) +} + +// GetType returns wire protocol value type. +func (vl LongValue) GetType() int { + return ParticleType.INTEGER +} + +// GetObject returns original value as an interface{}. +func (vl LongValue) GetObject() interface{} { + return int64(vl) +} + +// String implements Stringer interface. +func (vl LongValue) String() string { + return strconv.Itoa(int(vl)) +} + +/////////////////////////////////////////////////////////////////////////////// + +// FloatValue encapsulates an float64 value. +type FloatValue float64 + +// NewFloatValue generates a FloatValue instance. +func NewFloatValue(value float64) FloatValue { + return FloatValue(value) +} + +func (vl FloatValue) estimateSize() (int, error) { + return 8, nil +} + +func (vl FloatValue) write(cmd BufferEx) (int, error) { + return cmd.WriteFloat64(float64(vl)) +} + +func (vl FloatValue) pack(cmd BufferEx) (int, error) { + return __PackFloat64(cmd, float64(vl)) +} + +// GetType returns wire protocol value type. +func (vl FloatValue) GetType() int { + return ParticleType.FLOAT +} + +// GetObject returns original value as an interface{}. +func (vl FloatValue) GetObject() interface{} { + return float64(vl) +} + +// String implements Stringer interface. +func (vl FloatValue) String() string { + return (fmt.Sprintf("%f", vl)) +} + +/////////////////////////////////////////////////////////////////////////////// + +// ValueArray encapsulates an array of Value. +// Supported by Aerospike 3 servers only. +type ValueArray []Value + +// ToValueSlice converts a []interface{} to []Value. +// It will panic if any of array element types are not supported. +func ToValueSlice(array []interface{}) []Value { + // TODO: Do something about this method + res := make([]Value, 0, len(array)) + for i := range array { + res = append(res, NewValue(array[i])) + } + return res +} + +// ToValueArray converts a []interface{} to a ValueArray type. +// It will panic if any of array element types are not supported. +func ToValueArray(array []interface{}) *ValueArray { + return NewValueArray(ToValueSlice(array)) +} + +// NewValueArray generates a ValueArray instance. +func NewValueArray(array []Value) *ValueArray { + // return &ValueArray{*NewListerValue(valueList(array))} + res := ValueArray(array) + return &res +} + +func (va ValueArray) estimateSize() (int, error) { + return __PackValueArray(nil, va) +} + +func (va ValueArray) write(cmd BufferEx) (int, error) { + return __PackValueArray(cmd, va) +} + +func (va ValueArray) pack(cmd BufferEx) (int, error) { + return __PackValueArray(cmd, []Value(va)) +} + +// GetType returns wire protocol value type. +func (va ValueArray) GetType() int { + return ParticleType.LIST +} + +// GetObject returns original value as an interface{}. +func (va ValueArray) GetObject() interface{} { + return va +} + +// String implements Stringer interface. +func (va ValueArray) String() string { + return fmt.Sprintf("%v", []Value(va)) +} + +/////////////////////////////////////////////////////////////////////////////// + +// ListValue encapsulates any arbitrary array. +// Supported by Aerospike 3 servers only. +type ListValue []interface{} + +// NewListValue generates a ListValue instance. +func NewListValue(list []interface{}) ListValue { + return ListValue(list) +} + +func (vl ListValue) estimateSize() (int, error) { + return __PackIfcList(nil, vl) +} + +func (vl ListValue) write(cmd BufferEx) (int, error) { + return __PackIfcList(cmd, vl) +} + +func (vl ListValue) pack(cmd BufferEx) (int, error) { + return __PackIfcList(cmd, []interface{}(vl)) +} + +// GetType returns wire protocol value type. +func (vl ListValue) GetType() int { + return ParticleType.LIST +} + +// GetObject returns original value as an interface{}. +func (vl ListValue) GetObject() interface{} { + return vl +} + +// String implements Stringer interface. +func (vl ListValue) String() string { + return fmt.Sprintf("%v", []interface{}(vl)) +} + +/////////////////////////////////////////////////////////////////////////////// + +// ListValue encapsulates any arbitrary array. +// Supported by Aerospike 3 servers only. +type ListerValue struct { + list ListIter +} + +// NewListValue generates a ListValue instance. +func NewListerValue(list ListIter) *ListerValue { + res := &ListerValue{ + list: list, + } + + return res +} + +func (vl *ListerValue) estimateSize() (int, error) { + return __PackList(nil, vl.list) +} + +func (vl *ListerValue) write(cmd BufferEx) (int, error) { + return __PackList(cmd, vl.list) +} + +func (vl *ListerValue) pack(cmd BufferEx) (int, error) { + return __PackList(cmd, vl.list) +} + +// GetType returns wire protocol value type. +func (vl *ListerValue) GetType() int { + return ParticleType.LIST +} + +// GetObject returns original value as an interface{}. +func (vl *ListerValue) GetObject() interface{} { + return vl.list +} + +// String implements Stringer interface. +func (vl *ListerValue) String() string { + return fmt.Sprintf("%v", vl.list) +} + +/////////////////////////////////////////////////////////////////////////////// + +// MapValue encapsulates an arbitrary map. +// Supported by Aerospike 3 servers only. +type MapValue map[interface{}]interface{} + +// NewMapValue generates a MapValue instance. +func NewMapValue(vmap map[interface{}]interface{}) MapValue { + return MapValue(vmap) +} + +func (vl MapValue) estimateSize() (int, error) { + return __PackIfcMap(nil, vl) +} + +func (vl MapValue) write(cmd BufferEx) (int, error) { + return __PackIfcMap(cmd, vl) +} + +func (vl MapValue) pack(cmd BufferEx) (int, error) { + return __PackIfcMap(cmd, vl) +} + +// GetType returns wire protocol value type. +func (vl MapValue) GetType() int { + return ParticleType.MAP +} + +// GetObject returns original value as an interface{}. +func (vl MapValue) GetObject() interface{} { + return vl +} + +func (vl MapValue) String() string { + return fmt.Sprintf("%v", map[interface{}]interface{}(vl)) +} + +/////////////////////////////////////////////////////////////////////////////// + +// JsonValue encapsulates a Json map. +// Supported by Aerospike 3 servers only. +type JsonValue map[string]interface{} + +// NewMapValue generates a JsonValue instance. +func NewJsonValue(vmap map[string]interface{}) JsonValue { + return JsonValue(vmap) +} + +func (vl JsonValue) estimateSize() (int, error) { + return __PackJsonMap(nil, vl) +} + +func (vl JsonValue) write(cmd BufferEx) (int, error) { + return __PackJsonMap(cmd, vl) +} + +func (vl JsonValue) pack(cmd BufferEx) (int, error) { + return __PackJsonMap(cmd, vl) +} + +// GetType returns wire protocol value type. +func (vl JsonValue) GetType() int { + return ParticleType.MAP +} + +// GetObject returns original value as an interface{}. +func (vl JsonValue) GetObject() interface{} { + return vl +} + +func (vl JsonValue) String() string { + return fmt.Sprintf("%v", map[string]interface{}(vl)) +} + +/////////////////////////////////////////////////////////////////////////////// + +// MapperValue encapsulates an arbitrary map which implements a MapIter interface. +// Supported by Aerospike 3 servers only. +type MapperValue struct { + vmap MapIter +} + +// NewMapValue generates a MapperValue instance. +func NewMapperValue(vmap MapIter) *MapperValue { + res := &MapperValue{ + vmap: vmap, + } + + return res +} + +func (vl *MapperValue) estimateSize() (int, error) { + return __PackMap(nil, vl.vmap) +} + +func (vl *MapperValue) write(cmd BufferEx) (int, error) { + return __PackMap(cmd, vl.vmap) +} + +func (vl *MapperValue) pack(cmd BufferEx) (int, error) { + return __PackMap(cmd, vl.vmap) +} + +// GetType returns wire protocol value type. +func (vl *MapperValue) GetType() int { + return ParticleType.MAP +} + +// GetObject returns original value as an interface{}. +func (vl *MapperValue) GetObject() interface{} { + return vl.vmap +} + +func (vl *MapperValue) String() string { + return fmt.Sprintf("%v", vl.vmap) +} + +/////////////////////////////////////////////////////////////////////////////// + +// GeoJSONValue encapsulates a 2D Geo point. +// Supported by Aerospike 3.6.1 servers only. +type GeoJSONValue string + +// NewMapValue generates a GeoJSONValue instance. +func NewGeoJSONValue(value string) GeoJSONValue { + res := GeoJSONValue(value) + return res +} + +func (vl GeoJSONValue) estimateSize() (int, error) { + // flags + ncells + jsonstr + return 1 + 2 + len(string(vl)), nil +} + +func (vl GeoJSONValue) write(cmd BufferEx) (int, error) { + cmd.WriteByte(0) // flags + cmd.WriteByte(0) // flags + cmd.WriteByte(0) // flags + + return cmd.WriteString(string(vl)) +} + +func (vl GeoJSONValue) pack(cmd BufferEx) (int, error) { + return __PackGeoJson(cmd, string(vl)) +} + +// GetType returns wire protocol value type. +func (vl GeoJSONValue) GetType() int { + return ParticleType.GEOJSON +} + +// GetObject returns original value as an interface{}. +func (vl GeoJSONValue) GetObject() interface{} { + return string(vl) +} + +// String implements Stringer interface. +func (vl GeoJSONValue) String() string { + return string(vl) +} + +////////////////////////////////////////////////////////////////////////////// + +func bytesToParticle(ptype int, buf []byte, offset int, length int) (interface{}, error) { + + switch ptype { + case ParticleType.INTEGER: + // return `int` for 64bit platforms for compatibility reasons + if Buffer.Arch64Bits { + return int(Buffer.VarBytesToInt64(buf, offset, length)), nil + } + return Buffer.VarBytesToInt64(buf, offset, length), nil + + case ParticleType.STRING: + return string(buf[offset : offset+length]), nil + + case ParticleType.FLOAT: + return Buffer.BytesToFloat64(buf, offset), nil + + case ParticleType.MAP: + return newUnpacker(buf, offset, length).UnpackMap() + + case ParticleType.LIST: + return newUnpacker(buf, offset, length).UnpackList() + + case ParticleType.GEOJSON: + ncells := int(Buffer.BytesToInt16(buf, offset+1)) + headerSize := 1 + 2 + (ncells * 8) + return string(buf[offset+headerSize : offset+length]), nil + + case ParticleType.BLOB: + newObj := make([]byte, length) + copy(newObj, buf[offset:offset+length]) + return newObj, nil + + case ParticleType.LDT: + return newUnpacker(buf, offset, length).unpackObjects() + + } + return nil, nil +} + +func bytesToKeyValue(pType int, buf []byte, offset int, len int) (Value, error) { + + switch pType { + case ParticleType.STRING: + return NewStringValue(string(buf[offset : offset+len])), nil + + case ParticleType.INTEGER: + return NewLongValue(Buffer.VarBytesToInt64(buf, offset, len)), nil + + case ParticleType.FLOAT: + return NewFloatValue(Buffer.BytesToFloat64(buf, offset)), nil + + case ParticleType.BLOB: + bytes := make([]byte, len, len) + copy(bytes, buf[offset:offset+len]) + return NewBytesValue(bytes), nil + + default: + return nil, NewAerospikeError(PARSE_ERROR, fmt.Sprintf("ParticleType %d not recognized. Please file a github issue.", pType)) + } +} + +func unwrapValue(v interface{}) interface{} { + if v == nil { + return nil + } + + if uv, ok := v.(Value); ok { + return unwrapValue(uv.GetObject()) + } else if uv, ok := v.([]Value); ok { + a := make([]interface{}, len(uv)) + for i := range uv { + a[i] = unwrapValue(uv[i].GetObject()) + } + return a + } + + return v +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/value_helpers.go b/vendor/github.com/aerospike/aerospike-client-go/value_helpers.go new file mode 100644 index 00000000000..588efe979c5 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/value_helpers.go @@ -0,0 +1,59 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +// MapIter allows to define general maps of your own type to be used in the Go client +// without the use of reflection. +// function PackMap should be exactly Like the following (Do not change, just copy/paste and adapt PackXXX methods): +// func (cm *CustomMap) PackMap(buf aerospike.BufferEx) (int, error) { +// size := 0 +// for k, v := range cm { +// n, err := PackXXX(buf, k) +// size += n +// if err != nil { +// return size, err +// } + +// n, err = PackXXX(buf, v) +// size += n +// if err != nil { +// return size, err +// } +// } +// return size, nil +// } +type MapIter interface { + PackMap(buf BufferEx) (int, error) + Len() int +} + +// ListIter allows to define general maps of your own type to be used in the Go client +// without the use of reflection. +// function PackList should be exactly Like the following (Do not change, just copy/paste and adapt PackXXX methods): +// func (cs *CustomSlice) PackList(buf aerospike.BufferEx) (int, error) { +// size := 0 +// for _, elem := range cs { +// n, err := PackXXX(buf, elem) +// size += n +// if err != nil { +// return size, err +// } +// } +// return size, nil +// } +type ListIter interface { + PackList(buf BufferEx) (int, error) + Len() int +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/value_reflect.go b/vendor/github.com/aerospike/aerospike-client-go/value_reflect.go new file mode 100644 index 00000000000..a6605fa89d9 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/value_reflect.go @@ -0,0 +1,56 @@ +// +build !as_performance + +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + "reflect" +) + +func init() { + newValueReflect = concreteNewValueReflect +} + +func concreteNewValueReflect(v interface{}) Value { + // check for array and map + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Array, reflect.Slice: + l := rv.Len() + arr := make([]interface{}, l) + for i := 0; i < l; i++ { + arr[i] = rv.Index(i).Interface() + } + + return NewListValue(arr) + case reflect.Map: + l := rv.Len() + amap := make(map[interface{}]interface{}, l) + for _, i := range rv.MapKeys() { + amap[i.Interface()] = rv.MapIndex(i).Interface() + } + + return NewMapValue(amap) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return NewLongValue(reflect.ValueOf(v).Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: + return NewLongValue(int64(reflect.ValueOf(v).Uint())) + case reflect.String: + return NewStringValue(rv.String()) + } + + return nil +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/write_command.go b/vendor/github.com/aerospike/aerospike-client-go/write_command.go new file mode 100644 index 00000000000..4dc5be7bb75 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/write_command.go @@ -0,0 +1,90 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import ( + . "github.com/aerospike/aerospike-client-go/types" + Buffer "github.com/aerospike/aerospike-client-go/utils/buffer" +) + +// guarantee writeCommand implements command interface +var _ command = &writeCommand{} + +type writeCommand struct { + singleCommand + + policy *WritePolicy + bins []*Bin + binMap BinMap + operation OperationType +} + +func newWriteCommand(cluster *Cluster, + policy *WritePolicy, + key *Key, + bins []*Bin, + binMap BinMap, + operation OperationType) *writeCommand { + + newWriteCmd := &writeCommand{ + singleCommand: newSingleCommand(cluster, key), + policy: policy, + bins: bins, + binMap: binMap, + operation: operation, + } + + return newWriteCmd +} + +func (cmd *writeCommand) getPolicy(ifc command) Policy { + return cmd.policy +} + +func (cmd *writeCommand) writeBuffer(ifc command) error { + return cmd.setWrite(cmd.policy, cmd.operation, cmd.key, cmd.bins, cmd.binMap) +} + +func (cmd *writeCommand) getNode(ifc command) (*Node, error) { + return cmd.cluster.getMasterNode(&cmd.partition) +} + +func (cmd *writeCommand) parseResult(ifc command, conn *Connection) error { + // Read header. + if _, err := conn.Read(cmd.dataBuffer, int(_MSG_TOTAL_HEADER_SIZE)); err != nil { + return err + } + + header := Buffer.BytesToInt64(cmd.dataBuffer, 0) + + // Validate header to make sure we are at the beginning of a message + if err := cmd.validateHeader(header); err != nil { + return err + } + + resultCode := cmd.dataBuffer[13] & 0xFF + + if resultCode != 0 { + return NewAerospikeError(ResultCode(resultCode)) + } + if err := cmd.emptySocket(conn); err != nil { + return err + } + return nil +} + +func (cmd *writeCommand) Execute() error { + return cmd.execute(cmd) +} diff --git a/vendor/github.com/aerospike/aerospike-client-go/write_policy.go b/vendor/github.com/aerospike/aerospike-client-go/write_policy.go new file mode 100644 index 00000000000..53b317f2681 --- /dev/null +++ b/vendor/github.com/aerospike/aerospike-client-go/write_policy.go @@ -0,0 +1,92 @@ +// Copyright 2013-2017 Aerospike, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aerospike + +import "math" + +const ( + // TTLServerDefault will default to namespace configuration variable "default-ttl" on the server. + TTLServerDefault = 0 + // TTLDontExpire will never expire for Aerospike 2 server versions >= 2.7.2 and Aerospike 3 server. + TTLDontExpire = math.MaxUint32 + // TTLDontUpdate will not change the record's ttl when record is written. Supported by Aerospike server versions >= 3.10.1 + TTLDontUpdate = math.MaxUint32 - 1 +) + +// WritePolicy encapsulates parameters for policy attributes used in write operations. +// This object is passed into methods where database writes can occur. +type WritePolicy struct { + BasePolicy + + // RecordExistsAction qualifies how to handle writes where the record already exists. + RecordExistsAction RecordExistsAction //= RecordExistsAction.UPDATE; + + // GenerationPolicy qualifies how to handle record writes based on record generation. The default (NONE) + // indicates that the generation is not used to restrict writes. + GenerationPolicy GenerationPolicy //= GenerationPolicy.NONE; + + // Desired consistency guarantee when committing a transaction on the server. The default + // (COMMIT_ALL) indicates that the server should wait for master and all replica commits to + // be successful before returning success to the client. + CommitLevel CommitLevel //= COMMIT_ALL + + // Generation determines expected generation. + // Generation is the number of times a record has been + // modified (including creation) on the server. + // If a write operation is creating a record, the expected generation would be 0. + Generation uint32 + + // Expiration determines record expiration in seconds. Also known as TTL (Time-To-Live). + // Seconds record will live before being removed by the server. + // Expiration values: + // TTLServerDefault (0): Default to namespace configuration variable "default-ttl" on the server. + // TTLDontExpire (MaxUint32): Never expire for Aerospike 2 server versions >= 2.7.2 and Aerospike 3 server + // TTLDontUpdate (MaxUint32 - 1): Do not change ttl when record is written. Supported by Aerospike server versions >= 3.10.1 + // > 0: Actual expiration in seconds. + Expiration uint32 + + // Send user defined key in addition to hash digest on a record put. + // The default is to not send the user defined key. + SendKey bool + + // RespondPerEachOp defines for client.Operate() method, return a result for every operation. + // Some list operations do not return results by default (ListClearOp() for example). + // This can sometimes make it difficult to determine the desired result offset in the returned + // bin's result list. + // + // Setting RespondPerEachOp to true makes it easier to identify the desired result offset + // (result offset equals bin's operate sequence). This only makes sense when multiple list + // operations are used in one operate call and some of those operations do not return results + // by default. + RespondPerEachOp bool + + // DurableDelete leaves a tombstone for the record if the transaction results in a record deletion. + // This prevents deleted records from reappearing after node failures. + // Valid for Aerospike Server Enterprise Edition 4+ only. + DurableDelete bool +} + +// NewWritePolicy initializes a new WritePolicy instance with default parameters. +func NewWritePolicy(generation, expiration uint32) *WritePolicy { + return &WritePolicy{ + BasePolicy: *NewPolicy(), + RecordExistsAction: UPDATE, + GenerationPolicy: NONE, + CommitLevel: COMMIT_ALL, + Generation: generation, + Expiration: expiration, + SendKey: false, + } +} diff --git a/vendor/github.com/yuin/gopher-lua/LICENSE b/vendor/github.com/yuin/gopher-lua/LICENSE new file mode 100644 index 00000000000..4daf480a2fd --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Yusuke Inuzuka + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/yuin/gopher-lua/Makefile b/vendor/github.com/yuin/gopher-lua/Makefile new file mode 100644 index 00000000000..c5835e2ccb7 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/Makefile @@ -0,0 +1,10 @@ +.PHONY: build test + +build: + ./_tools/go-inline *.go && go fmt . && go build + +glua: *.go pm/*.go cmd/glua/glua.go + ./_tools/go-inline *.go && go fmt . && go build cmd/glua/glua.go + +test: + ./_tools/go-inline *.go && go fmt . && go test diff --git a/vendor/github.com/yuin/gopher-lua/README.rst b/vendor/github.com/yuin/gopher-lua/README.rst new file mode 100644 index 00000000000..dde3f540c61 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/README.rst @@ -0,0 +1,800 @@ +=============================================================================== +GopherLua: VM and compiler for Lua in Go. +=============================================================================== + +.. image:: https://godoc.org/github.com/yuin/gopher-lua?status.svg + :target: http://godoc.org/github.com/yuin/gopher-lua + +.. image:: https://travis-ci.org/yuin/gopher-lua.svg + :target: https://travis-ci.org/yuin/gopher-lua + +.. image:: https://coveralls.io/repos/yuin/gopher-lua/badge.svg + :target: https://coveralls.io/r/yuin/gopher-lua + +.. image:: https://badges.gitter.im/Join%20Chat.svg + :alt: Join the chat at https://gitter.im/yuin/gopher-lua + :target: https://gitter.im/yuin/gopher-lua?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge + +| + + +GopherLua is a Lua5.1 VM and compiler written in Go. GopherLua has a same goal +with Lua: **Be a scripting language with extensible semantics** . It provides +Go APIs that allow you to easily embed a scripting language to your Go host +programs. + +.. contents:: + :depth: 1 + +---------------------------------------------------------------- +Design principle +---------------------------------------------------------------- + +- Be a scripting language with extensible semantics. +- User-friendly Go API + - The stack based API like the one used in the original Lua + implementation will cause a performance improvements in GopherLua + (It will reduce memory allocations and concrete type <-> interface conversions). + GopherLua API is **not** the stack based API. + GopherLua give preference to the user-friendliness over the performance. + +---------------------------------------------------------------- +How about performance? +---------------------------------------------------------------- +GopherLua is not fast but not too slow, I think. + +GopherLua has almost equivalent ( or little bit better ) performance as Python3 on micro benchmarks. + +There are some benchmarks on the `wiki page `_ . + +---------------------------------------------------------------- +Installation +---------------------------------------------------------------- + +.. code-block:: bash + + go get github.com/yuin/gopher-lua + +GopherLua supports >= Go1.6. + +---------------------------------------------------------------- +Usage +---------------------------------------------------------------- +GopherLua APIs perform in much the same way as Lua, **but the stack is used only +for passing arguments and receiving returned values.** + +GopherLua supports channel operations. See **"Goroutines"** section. + +Import a package. + +.. code-block:: go + + import ( + "github.com/yuin/gopher-lua" + ) + +Run scripts in the VM. + +.. code-block:: go + + L := lua.NewState() + defer L.Close() + if err := L.DoString(`print("hello")`); err != nil { + panic(err) + } + +.. code-block:: go + + L := lua.NewState() + defer L.Close() + if err := L.DoFile("hello.lua"); err != nil { + panic(err) + } + +Refer to `Lua Reference Manual `_ and `Go doc `_ for further information. + +Note that elements that are not commented in `Go doc `_ equivalent to `Lua Reference Manual `_ , except GopherLua uses objects instead of Lua stack indices. + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Data model +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +All data in a GopherLua program is an ``LValue`` . ``LValue`` is an interface +type that has following methods. + +- ``String() string`` +- ``Type() LValueType`` + + +Objects implement an LValue interface are + +================ ========================= ================== ======================= + Type name Go type Type() value Constants +================ ========================= ================== ======================= + ``LNilType`` (constants) ``LTNil`` ``LNil`` + ``LBool`` (constants) ``LTBool`` ``LTrue``, ``LFalse`` + ``LNumber`` float64 ``LTNumber`` ``-`` + ``LString`` string ``LTString`` ``-`` + ``LFunction`` struct pointer ``LTFunction`` ``-`` + ``LUserData`` struct pointer ``LTUserData`` ``-`` + ``LState`` struct pointer ``LTThread`` ``-`` + ``LTable`` struct pointer ``LTTable`` ``-`` + ``LChannel`` chan LValue ``LTChannel`` ``-`` +================ ========================= ================== ======================= + +You can test an object type in Go way(type assertion) or using a ``Type()`` value. + +.. code-block:: go + + lv := L.Get(-1) // get the value at the top of the stack + if str, ok := lv.(lua.LString); ok { + // lv is LString + fmt.Println(string(str)) + } + if lv.Type() != lua.LTString { + panic("string required.") + } + +.. code-block:: go + + lv := L.Get(-1) // get the value at the top of the stack + if tbl, ok := lv.(*lua.LTable); ok { + // lv is LTable + fmt.Println(L.ObjLen(tbl)) + } + +Note that ``LBool`` , ``LNumber`` , ``LString`` is not a pointer. + +To test ``LNilType`` and ``LBool``, You **must** use pre-defined constants. + +.. code-block:: go + + lv := L.Get(-1) // get the value at the top of the stack + + if lv == lua.LTrue { // correct + } + + if bl, ok := lv.(lua.LBool); ok && bool(bl) { // wrong + } + +In Lua, both ``nil`` and ``false`` make a condition false. ``LVIsFalse`` and ``LVAsBool`` implement this specification. + +.. code-block:: go + + lv := L.Get(-1) // get the value at the top of the stack + if lua.LVIsFalse(lv) { // lv is nil or false + } + + if lua.LVAsBool(lv) { // lv is neither nil nor false + } + +Objects that based on go structs(``LFunction``. ``LUserData``, ``LTable``) +have some public methods and fields. You can use these methods and fields for +performance and debugging, but there are some limitations. + +- Metatable does not work. +- No error handlings. + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Callstack & Registry size +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Size of the callstack & registry is **fixed** for mainly performance. +You can change the default size of the callstack & registry. + +.. code-block:: go + + lua.RegistrySize = 1024 * 20 + lua.CallStackSize = 1024 + L := lua.NewState() + defer L.Close() + +You can also create an LState object that has the callstack & registry size specified by ``Options`` . + +.. code-block:: go + + L := lua.NewState(lua.Options{ + CallStackSize: 120, + RegistrySize: 120*20, + }) + +An LState object that has been created by ``*LState#NewThread()`` inherits the callstack & registry size from the parent LState object. + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Miscellaneous lua.NewState options +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +- **Options.SkipOpenLibs bool(default false)** + - By default, GopherLua opens all built-in libraries when new LState is created. + - You can skip this behaviour by setting this to ``true`` . + - Using the various `OpenXXX(L *LState) int` functions you can open only those libraries that you require, for an example see below. +- **Options.IncludeGoStackTrace bool(default false)** + - By default, GopherLua does not show Go stack traces when panics occur. + - You can get Go stack traces by setting this to ``true`` . + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +API +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Refer to `Lua Reference Manual `_ and `Go doc(LState methods) `_ for further information. + ++++++++++++++++++++++++++++++++++++++++++ +Calling Go from Lua ++++++++++++++++++++++++++++++++++++++++++ + +.. code-block:: go + + func Double(L *lua.LState) int { + lv := L.ToInt(1) /* get argument */ + L.Push(lua.LNumber(lv * 2)) /* push result */ + return 1 /* number of results */ + } + + func main() { + L := lua.NewState() + defer L.Close() + L.SetGlobal("double", L.NewFunction(Double)) /* Original lua_setglobal uses stack... */ + } + +.. code-block:: lua + + print(double(20)) -- > "40" + +Any function registered with GopherLua is a ``lua.LGFunction``, defined in ``value.go`` + +.. code-block:: go + + type LGFunction func(*LState) int + +Working with coroutines. + +.. code-block:: go + + co, _ := L.NewThread() /* create a new thread */ + fn := L.GetGlobal("coro").(*lua.LFunction) /* get function from lua */ + for { + st, err, values := L.Resume(co, fn) + if st == lua.ResumeError { + fmt.Println("yield break(error)") + fmt.Println(err.Error()) + break + } + + for i, lv := range values { + fmt.Printf("%v : %v\n", i, lv) + } + + if st == lua.ResumeOK { + fmt.Println("yield break(ok)") + break + } + } + ++++++++++++++++++++++++++++++++++++++++++ +Opening a subset of builtin modules ++++++++++++++++++++++++++++++++++++++++++ + +The following demonstrates how to open a subset of the built-in modules in Lua, say for example to avoid enabling modules with access to local files or system calls. + +main.go + +.. code-block:: go + + func main() { + L := lua.NewState(lua.Options{SkipOpenLibs: true}) + defer L.Close() + for _, pair := range []struct { + n string + f lua.LGFunction + }{ + {lua.LoadLibName, lua.OpenPackage}, // Must be first + {lua.BaseLibName, lua.OpenBase}, + {lua.TabLibName, lua.OpenTable}, + } { + if err := L.CallByParam(lua.P{ + Fn: L.NewFunction(pair.f), + NRet: 0, + Protect: true, + }, lua.LString(pair.n)); err != nil { + panic(err) + } + } + if err := L.DoFile("main.lua"); err != nil { + panic(err) + } + } + ++++++++++++++++++++++++++++++++++++++++++ +Creating a module by Go ++++++++++++++++++++++++++++++++++++++++++ + +mymodule.go + +.. code-block:: go + + package mymodule + + import ( + "github.com/yuin/gopher-lua" + ) + + func Loader(L *lua.LState) int { + // register functions to the table + mod := L.SetFuncs(L.NewTable(), exports) + // register other stuff + L.SetField(mod, "name", lua.LString("value")) + + // returns the module + L.Push(mod) + return 1 + } + + var exports = map[string]lua.LGFunction{ + "myfunc": myfunc, + } + + func myfunc(L *lua.LState) int { + return 0 + } + +mymain.go + +.. code-block:: go + + package main + + import ( + "./mymodule" + "github.com/yuin/gopher-lua" + ) + + func main() { + L := lua.NewState() + defer L.Close() + L.PreloadModule("mymodule", mymodule.Loader) + if err := L.DoFile("main.lua"); err != nil { + panic(err) + } + } + +main.lua + +.. code-block:: lua + + local m = require("mymodule") + m.myfunc() + print(m.name) + + ++++++++++++++++++++++++++++++++++++++++++ +Calling Lua from Go ++++++++++++++++++++++++++++++++++++++++++ + +.. code-block:: go + + L := lua.NewState() + defer L.Close() + if err := L.DoFile("double.lua"); err != nil { + panic(err) + } + if err := L.CallByParam(lua.P{ + Fn: L.GetGlobal("double"), + NRet: 1, + Protect: true, + }, lua.LNumber(10)); err != nil { + panic(err) + } + ret := L.Get(-1) // returned value + L.Pop(1) // remove received value + +If ``Protect`` is false, GopherLua will panic instead of returning an ``error`` value. + ++++++++++++++++++++++++++++++++++++++++++ +User-Defined types ++++++++++++++++++++++++++++++++++++++++++ +You can extend GopherLua with new types written in Go. +``LUserData`` is provided for this purpose. + +.. code-block:: go + + type Person struct { + Name string + } + + const luaPersonTypeName = "person" + + // Registers my person type to given L. + func registerPersonType(L *lua.LState) { + mt := L.NewTypeMetatable(luaPersonTypeName) + L.SetGlobal("person", mt) + // static attributes + L.SetField(mt, "new", L.NewFunction(newPerson)) + // methods + L.SetField(mt, "__index", L.SetFuncs(L.NewTable(), personMethods)) + } + + // Constructor + func newPerson(L *lua.LState) int { + person := &Person{L.CheckString(1)} + ud := L.NewUserData() + ud.Value = person + L.SetMetatable(ud, L.GetTypeMetatable(luaPersonTypeName)) + L.Push(ud) + return 1 + } + + // Checks whether the first lua argument is a *LUserData with *Person and returns this *Person. + func checkPerson(L *lua.LState) *Person { + ud := L.CheckUserData(1) + if v, ok := ud.Value.(*Person); ok { + return v + } + L.ArgError(1, "person expected") + return nil + } + + var personMethods = map[string]lua.LGFunction{ + "name": personGetSetName, + } + + // Getter and setter for the Person#Name + func personGetSetName(L *lua.LState) int { + p := checkPerson(L) + if L.GetTop() == 2 { + p.Name = L.CheckString(2) + return 0 + } + L.Push(lua.LString(p.Name)) + return 1 + } + + func main() { + L := lua.NewState() + defer L.Close() + registerPersonType(L) + if err := L.DoString(` + p = person.new("Steeve") + print(p:name()) -- "Steeve" + p:name("Alice") + print(p:name()) -- "Alice" + `); err != nil { + panic(err) + } + } + ++++++++++++++++++++++++++++++++++++++++++ +Terminating a running LState ++++++++++++++++++++++++++++++++++++++++++ +GopherLua supports the `Go Concurrency Patterns: Context `_ . + + +.. code-block:: go + + L := lua.NewState() + defer L.Close() + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + // set the context to our LState + L.SetContext(ctx) + err := L.DoString(` + local clock = os.clock + function sleep(n) -- seconds + local t0 = clock() + while clock() - t0 <= n do end + end + sleep(3) + `) + // err.Error() contains "context deadline exceeded" + +With coroutines + +.. code-block:: go + + L := lua.NewState() + defer L.Close() + ctx, cancel := context.WithCancel(context.Background()) + L.SetContext(ctx) + defer cancel() + L.DoString(` + function coro() + local i = 0 + while true do + coroutine.yield(i) + i = i+1 + end + return i + end + `) + co, cocancel := L.NewThread() + defer cocancel() + fn := L.GetGlobal("coro").(*LFunction) + + _, err, values := L.Resume(co, fn) // err is nil + + cancel() // cancel the parent context + + _, err, values = L.Resume(co, fn) // err is NOT nil : child context was canceled + +**Note that using a context causes performance degradation.** + +.. code-block:: + + time ./glua-with-context.exe fib.lua + 9227465 + 0.01s user 0.11s system 1% cpu 7.505 total + + time ./glua-without-context.exe fib.lua + 9227465 + 0.01s user 0.01s system 0% cpu 5.306 total + + ++++++++++++++++++++++++++++++++++++++++++ +Goroutines ++++++++++++++++++++++++++++++++++++++++++ +The ``LState`` is not goroutine-safe. It is recommended to use one LState per goroutine and communicate between goroutines by using channels. + +Channels are represented by ``channel`` objects in GopherLua. And a ``channel`` table provides functions for performing channel operations. + +Some objects can not be sent over channels due to having non-goroutine-safe objects inside itself. + +- a thread(state) +- a function +- an userdata +- a table with a metatable + +You **must not** send these objects from Go APIs to channels. + + + +.. code-block:: go + + func receiver(ch, quit chan lua.LValue) { + L := lua.NewState() + defer L.Close() + L.SetGlobal("ch", lua.LChannel(ch)) + L.SetGlobal("quit", lua.LChannel(quit)) + if err := L.DoString(` + local exit = false + while not exit do + channel.select( + {"|<-", ch, function(ok, v) + if not ok then + print("channel closed") + exit = true + else + print("received:", v) + end + end}, + {"|<-", quit, function(ok, v) + print("quit") + exit = true + end} + ) + end + `); err != nil { + panic(err) + } + } + + func sender(ch, quit chan lua.LValue) { + L := lua.NewState() + defer L.Close() + L.SetGlobal("ch", lua.LChannel(ch)) + L.SetGlobal("quit", lua.LChannel(quit)) + if err := L.DoString(` + ch:send("1") + ch:send("2") + `); err != nil { + panic(err) + } + ch <- lua.LString("3") + quit <- lua.LTrue + } + + func main() { + ch := make(chan lua.LValue) + quit := make(chan lua.LValue) + go receiver(ch, quit) + go sender(ch, quit) + time.Sleep(3 * time.Second) + } + +''''''''''''''' +Go API +''''''''''''''' + +``ToChannel``, ``CheckChannel``, ``OptChannel`` are available. + +Refer to `Go doc(LState methods) `_ for further information. + +''''''''''''''' +Lua API +''''''''''''''' + +- **channel.make([buf:int]) -> ch:channel** + - Create new channel that has a buffer size of ``buf``. By default, ``buf`` is 0. + +- **channel.select(case:table [, case:table, case:table ...]) -> {index:int, recv:any, ok}** + - Same as the ``select`` statement in Go. It returns the index of the chosen case and, if that + case was a receive operation, the value received and a boolean indicating whether the channel has been closed. + - ``case`` is a table that outlined below. + - receiving: `{"|<-", ch:channel [, handler:func(ok, data:any)]}` + - sending: `{"<-|", ch:channel, data:any [, handler:func(data:any)]}` + - default: `{"default" [, handler:func()]}` + +``channel.select`` examples: + +.. code-block:: lua + + local idx, recv, ok = channel.select( + {"|<-", ch1}, + {"|<-", ch2} + ) + if not ok then + print("closed") + elseif idx == 1 then -- received from ch1 + print(recv) + elseif idx == 2 then -- received from ch2 + print(recv) + end + +.. code-block:: lua + + channel.select( + {"|<-", ch1, function(ok, data) + print(ok, data) + end}, + {"<-|", ch2, "value", function(data) + print(data) + end}, + {"default", function() + print("default action") + end} + ) + +- **channel:send(data:any)** + - Send ``data`` over the channel. +- **channel:receive() -> ok:bool, data:any** + - Receive some data over the channel. +- **channel:close()** + - Close the channel. + +'''''''''''''''''''''''''''''' +The LState pool pattern +'''''''''''''''''''''''''''''' +To create per-thread LState instances, You can use the ``sync.Pool`` like mechanism. + +.. code-block:: go + + type lStatePool struct { + m sync.Mutex + saved []*lua.LState + } + + func (pl *lStatePool) Get() *lua.LState { + pl.m.Lock() + defer pl.m.Unlock() + n := len(pl.saved) + if n == 0 { + return pl.New() + } + x := pl.saved[n-1] + pl.saved = pl.saved[0 : n-1] + return x + } + + func (pl *lStatePool) New() *lua.LState { + L := lua.NewState() + // setting the L up here. + // load scripts, set global variables, share channels, etc... + return L + } + + func (pl *lStatePool) Put(L *lua.LState) { + pl.m.Lock() + defer pl.m.Unlock() + pl.saved = append(pl.saved, L) + } + + func (pl *lStatePool) Shutdown() { + for _, L := range pl.saved { + L.Close() + } + } + + // Global LState pool + var luaPool = &lStatePool{ + saved: make([]*lua.LState, 0, 4), + } + +Now, you can get per-thread LState objects from the ``luaPool`` . + +.. code-block:: go + + func MyWorker() { + L := luaPool.Get() + defer luaPool.Put(L) + /* your code here */ + } + + func main() { + defer luaPool.Shutdown() + go MyWorker() + go MyWorker() + /* etc... */ + } + + +---------------------------------------------------------------- +Differences between Lua and GopherLua +---------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Goroutines +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- GopherLua supports channel operations. + - GopherLua has a type named ``channel``. + - The ``channel`` table provides functions for performing channel operations. + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Unsupported functions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- ``string.dump`` +- ``os.setlocale`` +- ``lua_Debug.namewhat`` +- ``package.loadlib`` +- debug hooks + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Miscellaneous notes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- ``collectgarbage`` does not take any arguments and runs the garbage collector for the entire Go program. +- ``file:setvbuf`` does not support a line buffering. +- Daylight saving time is not supported. +- GopherLua has a function to set an environment variable : ``os.setenv(name, value)`` + +---------------------------------------------------------------- +Standalone interpreter +---------------------------------------------------------------- +Lua has an interpreter called ``lua`` . GopherLua has an interpreter called ``glua`` . + +.. code-block:: bash + + go get github.com/yuin/gopher-lua/cmd/glua + +``glua`` has same options as ``lua`` . + +---------------------------------------------------------------- +How to Contribute +---------------------------------------------------------------- +See `Guidlines for contributors `_ . + +---------------------------------------------------------------- +Libraries for GopherLua +---------------------------------------------------------------- + +- `gopher-luar `_ : Custom type reflection for gopher-lua +- `gluamapper `_ : Mapping a Lua table to a Go struct +- `gluare `_ : Regular expressions for gopher-lua +- `gluahttp `_ : HTTP request module for gopher-lua +- `gopher-json `_ : A simple JSON encoder/decoder for gopher-lua +- `gluayaml `_ : Yaml parser for gopher-lua +- `glua-lfs `_ : Partially implements the luafilesystem module for gopher-lua +- `gluaurl `_ : A url parser/builder module for gopher-lua +- `gluahttpscrape `_ : A simple HTML scraper module for gopher-lua +- `gluaxmlpath `_ : An xmlpath module for gopher-lua + +---------------------------------------------------------------- +Donation +---------------------------------------------------------------- + +BTC: 1NEDSyUmo4SMTDP83JJQSWi1MvQUGGNMZB + +---------------------------------------------------------------- +License +---------------------------------------------------------------- +MIT + +---------------------------------------------------------------- +Author +---------------------------------------------------------------- +Yusuke Inuzuka diff --git a/vendor/github.com/yuin/gopher-lua/_state.go b/vendor/github.com/yuin/gopher-lua/_state.go new file mode 100644 index 00000000000..247c87c9fbb --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/_state.go @@ -0,0 +1,1785 @@ +package lua + +import ( + "fmt" + "github.com/yuin/gopher-lua/parse" + "golang.org/x/net/context" + "io" + "math" + "os" + "runtime" + "strings" + "sync/atomic" + "time" +) + +const MultRet = -1 +const RegistryIndex = -10000 +const EnvironIndex = -10001 +const GlobalsIndex = -10002 + +/* ApiError {{{ */ + +type ApiError struct { + Type ApiErrorType + Object LValue + StackTrace string + // Underlying error. This attribute is set only if the Type is ApiErrorFile or ApiErrorSyntax + Cause error +} + +func newApiError(code ApiErrorType, object LValue) *ApiError { + return &ApiError{code, object, "", nil} +} + +func newApiErrorS(code ApiErrorType, message string) *ApiError { + return newApiError(code, LString(message)) +} + +func newApiErrorE(code ApiErrorType, err error) *ApiError { + return &ApiError{code, LString(err.Error()), "", err} +} + +func (e *ApiError) Error() string { + if len(e.StackTrace) > 0 { + return fmt.Sprintf("%s\n%s", e.Object.String(), e.StackTrace) + } + return e.Object.String() +} + +type ApiErrorType int + +const ( + ApiErrorSyntax ApiErrorType = iota + ApiErrorFile + ApiErrorRun + ApiErrorError + ApiErrorPanic +) + +/* }}} */ + +/* ResumeState {{{ */ + +type ResumeState int + +const ( + ResumeOK ResumeState = iota + ResumeYield + ResumeError +) + +/* }}} */ + +/* P {{{ */ + +type P struct { + Fn LValue + NRet int + Protect bool + Handler *LFunction +} + +/* }}} */ + +/* Options {{{ */ + +// Options is a configuration that is used to create a new LState. +type Options struct { + // Call stack size. This defaults to `lua.CallStackSize`. + CallStackSize int + // Data stack size. This defaults to `lua.RegistrySize`. + RegistrySize int + // Controls whether or not libraries are opened by default + SkipOpenLibs bool + // Tells whether a Go stacktrace should be included in a Lua stacktrace when panics occur. + IncludeGoStackTrace bool +} + +/* }}} */ + +/* Debug {{{ */ + +type Debug struct { + frame *callFrame + Name string + What string + Source string + CurrentLine int + NUpvalues int + LineDefined int + LastLineDefined int +} + +/* }}} */ + +/* callFrame {{{ */ + +type callFrame struct { + Idx int + Fn *LFunction + Parent *callFrame + Pc int + Base int + LocalBase int + ReturnBase int + NArgs int + NRet int + TailCall int +} + +type callFrameStack struct { + array []callFrame + sp int +} + +func newCallFrameStack(size int) *callFrameStack { + return &callFrameStack{ + array: make([]callFrame, size), + sp: 0, + } +} + +func (cs *callFrameStack) IsEmpty() bool { return cs.sp == 0 } + +func (cs *callFrameStack) Clear() { + cs.sp = 0 +} + +func (cs *callFrameStack) Push(v callFrame) { // +inline-start + cs.array[cs.sp] = v + cs.array[cs.sp].Idx = cs.sp + cs.sp++ +} // +inline-end + +func (cs *callFrameStack) Remove(sp int) { + psp := sp - 1 + nsp := sp + 1 + var pre *callFrame + var next *callFrame + if psp > 0 { + pre = &cs.array[psp] + } + if nsp < cs.sp { + next = &cs.array[nsp] + } + if next != nil { + next.Parent = pre + } + for i := sp; i+1 < cs.sp; i++ { + cs.array[i] = cs.array[i+1] + cs.array[i].Idx = i + cs.sp = i + } + cs.sp++ +} + +func (cs *callFrameStack) Sp() int { + return cs.sp +} + +func (cs *callFrameStack) SetSp(sp int) { + cs.sp = sp +} + +func (cs *callFrameStack) Last() *callFrame { + if cs.sp == 0 { + return nil + } + return &cs.array[cs.sp-1] +} + +func (cs *callFrameStack) At(sp int) *callFrame { + return &cs.array[sp] +} + +func (cs *callFrameStack) Pop() *callFrame { + cs.sp-- + return &cs.array[cs.sp] +} + +/* }}} */ + +/* registry {{{ */ + +type registry struct { + array []LValue + top int + alloc *allocator +} + +func newRegistry(size int, alloc *allocator) *registry { + return ®istry{make([]LValue, size), 0, alloc} +} + +func (rg *registry) SetTop(top int) { + oldtop := rg.top + rg.top = top + for i := oldtop; i < rg.top; i++ { + rg.array[i] = LNil + } + for i := rg.top; i < oldtop; i++ { + rg.array[i] = LNil + } +} + +func (rg *registry) Top() int { + return rg.top +} + +func (rg *registry) Push(v LValue) { + rg.array[rg.top] = v + rg.top++ +} + +func (rg *registry) Pop() LValue { + v := rg.array[rg.top-1] + rg.array[rg.top-1] = LNil + rg.top-- + return v +} + +func (rg *registry) Get(reg int) LValue { + return rg.array[reg] +} + +func (rg *registry) CopyRange(regv, start, limit, n int) { // +inline-start + for i := 0; i < n; i++ { + if tidx := start + i; tidx >= rg.top || limit > -1 && tidx >= limit || tidx < 0 { + rg.array[regv+i] = LNil + } else { + rg.array[regv+i] = rg.array[tidx] + } + } + rg.top = regv + n +} // +inline-end + +func (rg *registry) FillNil(regm, n int) { // +inline-start + for i := 0; i < n; i++ { + rg.array[regm+i] = LNil + } + rg.top = regm + n +} // +inline-end + +func (rg *registry) Insert(value LValue, reg int) { + top := rg.Top() + if reg >= top { + rg.Set(reg, value) + return + } + top-- + for ; top >= reg; top-- { + rg.Set(top+1, rg.Get(top)) + } + rg.Set(reg, value) +} + +func (rg *registry) Set(reg int, val LValue) { + rg.array[reg] = val + if reg >= rg.top { + rg.top = reg + 1 + } +} + +func (rg *registry) SetNumber(reg int, val LNumber) { + rg.array[reg] = rg.alloc.LNumber2I(val) + if reg >= rg.top { + rg.top = reg + 1 + } +} /* }}} */ + +/* Global {{{ */ + +func newGlobal() *Global { + return &Global{ + MainThread: nil, + Registry: newLTable(0, 32), + Global: newLTable(0, 64), + builtinMts: make(map[int]LValue), + tempFiles: make([]*os.File, 0, 10), + } +} + +/* }}} */ + +/* package local methods {{{ */ + +func panicWithTraceback(L *LState) { + err := newApiError(ApiErrorRun, L.Get(-1)) + err.StackTrace = L.stackTrace(0) + panic(err) +} + +func panicWithoutTraceback(L *LState) { + err := newApiError(ApiErrorRun, L.Get(-1)) + panic(err) +} + +func newLState(options Options) *LState { + al := newAllocator(32) + ls := &LState{ + G: newGlobal(), + Parent: nil, + Panic: panicWithTraceback, + Dead: false, + Options: options, + + stop: 0, + reg: newRegistry(options.RegistrySize, al), + stack: newCallFrameStack(options.CallStackSize), + alloc: al, + currentFrame: nil, + wrapped: false, + uvcache: nil, + hasErrorFunc: false, + mainLoop: mainLoop, + ctx: nil, + } + ls.Env = ls.G.Global + return ls +} + +func (ls *LState) printReg() { + println("-------------------------") + println("thread:", ls) + println("top:", ls.reg.Top()) + if ls.currentFrame != nil { + println("function base:", ls.currentFrame.Base) + println("return base:", ls.currentFrame.ReturnBase) + } else { + println("(vm not started)") + } + println("local base:", ls.currentLocalBase()) + for i := 0; i < ls.reg.Top(); i++ { + println(i, ls.reg.Get(i).String()) + } + println("-------------------------") +} + +func (ls *LState) printCallStack() { + println("-------------------------") + for i := 0; i < ls.stack.Sp(); i++ { + print(i) + print(" ") + frame := ls.stack.At(i) + if frame == nil { + break + } + if frame.Fn.IsG { + println("IsG:", true, "Frame:", frame, "Fn:", frame.Fn) + } else { + println("IsG:", false, "Frame:", frame, "Fn:", frame.Fn, "pc:", frame.Pc) + } + } + println("-------------------------") +} + +func (ls *LState) closeAllUpvalues() { // +inline-start + for cf := ls.currentFrame; cf != nil; cf = cf.Parent { + if !cf.Fn.IsG { + ls.closeUpvalues(cf.LocalBase) + } + } +} // +inline-end + +func (ls *LState) raiseError(level int, format string, args ...interface{}) { + if !ls.hasErrorFunc { + ls.closeAllUpvalues() + } + message := format + if len(args) > 0 { + message = fmt.Sprintf(format, args...) + } + if level > 0 { + message = fmt.Sprintf("%v %v", ls.where(level-1, true), message) + } + ls.reg.Push(LString(message)) + ls.Panic(ls) +} + +func (ls *LState) findLocal(frame *callFrame, no int) string { + fn := frame.Fn + if !fn.IsG { + if name, ok := fn.LocalName(no, frame.Pc-1); ok { + return name + } + } + var top int + if ls.currentFrame == frame { + top = ls.reg.Top() + } else if frame.Idx+1 < ls.stack.Sp() { + top = ls.stack.At(frame.Idx + 1).Base + } else { + return "" + } + if top-frame.LocalBase >= no { + return "(*temporary)" + } + return "" +} + +func (ls *LState) where(level int, skipg bool) string { + dbg, ok := ls.GetStack(level) + if !ok { + return "" + } + cf := dbg.frame + proto := cf.Fn.Proto + sourcename := "[G]" + if proto != nil { + sourcename = proto.SourceName + } else if skipg { + return ls.where(level+1, skipg) + } + line := "" + if proto != nil { + line = fmt.Sprintf("%v:", proto.DbgSourcePositions[cf.Pc-1]) + } + return fmt.Sprintf("%v:%v", sourcename, line) +} + +func (ls *LState) stackTrace(level int) string { + buf := []string{} + header := "stack traceback:" + if ls.currentFrame != nil { + i := 0 + for dbg, ok := ls.GetStack(i); ok; dbg, ok = ls.GetStack(i) { + cf := dbg.frame + buf = append(buf, fmt.Sprintf("\t%v in %v", ls.Where(i), ls.formattedFrameFuncName(cf))) + if !cf.Fn.IsG && cf.TailCall > 0 { + for tc := cf.TailCall; tc > 0; tc-- { + buf = append(buf, "\t(tailcall): ?") + i++ + } + } + i++ + } + } + buf = append(buf, fmt.Sprintf("\t%v: %v", "[G]", "?")) + buf = buf[intMax(0, intMin(level, len(buf))):len(buf)] + if len(buf) > 20 { + newbuf := make([]string, 0, 20) + newbuf = append(newbuf, buf[0:7]...) + newbuf = append(newbuf, "\t...") + newbuf = append(newbuf, buf[len(buf)-7:len(buf)]...) + buf = newbuf + } + return fmt.Sprintf("%s\n%s", header, strings.Join(buf, "\n")) +} + +func (ls *LState) formattedFrameFuncName(fr *callFrame) string { + name, ischunk := ls.frameFuncName(fr) + if ischunk { + return name + } + if name[0] != '(' && name[0] != '<' { + return fmt.Sprintf("function '%s'", name) + } + return fmt.Sprintf("function %s", name) +} + +func (ls *LState) rawFrameFuncName(fr *callFrame) string { + name, _ := ls.frameFuncName(fr) + return name +} + +func (ls *LState) frameFuncName(fr *callFrame) (string, bool) { + frame := fr.Parent + if frame == nil { + if ls.Parent == nil { + return "main chunk", true + } else { + return "corountine", true + } + } + if !frame.Fn.IsG { + pc := frame.Pc - 1 + for _, call := range frame.Fn.Proto.DbgCalls { + if call.Pc == pc { + name := call.Name + if (name == "?" || fr.TailCall > 0) && !fr.Fn.IsG { + name = fmt.Sprintf("<%v:%v>", fr.Fn.Proto.SourceName, fr.Fn.Proto.LineDefined) + } + return name, false + } + } + } + if !fr.Fn.IsG { + return fmt.Sprintf("<%v:%v>", fr.Fn.Proto.SourceName, fr.Fn.Proto.LineDefined), false + } + return "(anonymous)", false +} + +func (ls *LState) isStarted() bool { + return ls.currentFrame != nil +} + +func (ls *LState) kill() { + ls.Dead = true +} + +func (ls *LState) indexToReg(idx int) int { + base := ls.currentLocalBase() + if idx > 0 { + return base + idx - 1 + } else if idx == 0 { + return -1 + } else { + tidx := ls.reg.Top() + idx + if tidx < base { + return -1 + } + return tidx + } +} + +func (ls *LState) currentLocalBase() int { + base := 0 + if ls.currentFrame != nil { + base = ls.currentFrame.LocalBase + } + return base +} + +func (ls *LState) currentEnv() *LTable { + return ls.Env + /* + if ls.currentFrame == nil { + return ls.Env + } + return ls.currentFrame.Fn.Env + */ +} + +func (ls *LState) rkValue(idx int) LValue { + /* + if OpIsK(idx) { + return ls.currentFrame.Fn.Proto.Constants[opIndexK(idx)] + } + return ls.reg.Get(ls.currentFrame.LocalBase + idx) + */ + if (idx & opBitRk) != 0 { + return ls.currentFrame.Fn.Proto.Constants[idx & ^opBitRk] + } + return ls.reg.array[ls.currentFrame.LocalBase+idx] +} + +func (ls *LState) rkString(idx int) string { + if (idx & opBitRk) != 0 { + return ls.currentFrame.Fn.Proto.stringConstants[idx & ^opBitRk] + } + return string(ls.reg.array[ls.currentFrame.LocalBase+idx].(LString)) +} + +func (ls *LState) closeUpvalues(idx int) { // +inline-start + if ls.uvcache != nil { + var prev *Upvalue + for uv := ls.uvcache; uv != nil; uv = uv.next { + if uv.index >= idx { + if prev != nil { + prev.next = nil + } else { + ls.uvcache = nil + } + uv.Close() + } + prev = uv + } + } +} // +inline-end + +func (ls *LState) findUpvalue(idx int) *Upvalue { + var prev *Upvalue + var next *Upvalue + if ls.uvcache != nil { + for uv := ls.uvcache; uv != nil; uv = uv.next { + if uv.index == idx { + return uv + } + if uv.index > idx { + next = uv + break + } + prev = uv + } + } + uv := &Upvalue{reg: ls.reg, index: idx, closed: false} + if prev != nil { + prev.next = uv + } else { + ls.uvcache = uv + } + if next != nil { + uv.next = next + } + return uv +} + +func (ls *LState) metatable(lvalue LValue, rawget bool) LValue { + var metatable LValue = LNil + switch obj := lvalue.(type) { + case *LTable: + metatable = obj.Metatable + case *LUserData: + metatable = obj.Metatable + default: + if table, ok := ls.G.builtinMts[int(obj.Type())]; ok { + metatable = table + } + } + + if !rawget && metatable != LNil { + oldmt := metatable + if tb, ok := metatable.(*LTable); ok { + metatable = tb.RawGetString("__metatable") + if metatable == LNil { + metatable = oldmt + } + } + } + + return metatable +} + +func (ls *LState) metaOp1(lvalue LValue, event string) LValue { + if mt := ls.metatable(lvalue, true); mt != LNil { + if tb, ok := mt.(*LTable); ok { + return tb.RawGetString(event) + } + } + return LNil +} + +func (ls *LState) metaOp2(value1, value2 LValue, event string) LValue { + if mt := ls.metatable(value1, true); mt != LNil { + if tb, ok := mt.(*LTable); ok { + if ret := tb.RawGetString(event); ret != LNil { + return ret + } + } + } + if mt := ls.metatable(value2, true); mt != LNil { + if tb, ok := mt.(*LTable); ok { + return tb.RawGetString(event) + } + } + return LNil +} + +func (ls *LState) metaCall(lvalue LValue) (*LFunction, bool) { + if fn, ok := lvalue.(*LFunction); ok { + return fn, false + } + if fn, ok := ls.metaOp1(lvalue, "__call").(*LFunction); ok { + return fn, true + } + return nil, false +} + +func (ls *LState) initCallFrame(cf *callFrame) { // +inline-start + if cf.Fn.IsG { + ls.reg.SetTop(cf.LocalBase + cf.NArgs) + } else { + proto := cf.Fn.Proto + nargs := cf.NArgs + np := int(proto.NumParameters) + for i := nargs; i < np; i++ { + ls.reg.array[cf.LocalBase+i] = LNil + nargs = np + } + + if (proto.IsVarArg & VarArgIsVarArg) == 0 { + if nargs < int(proto.NumUsedRegisters) { + nargs = int(proto.NumUsedRegisters) + } + for i := np; i < nargs; i++ { + ls.reg.array[cf.LocalBase+i] = LNil + } + ls.reg.top = cf.LocalBase + int(proto.NumUsedRegisters) + } else { + /* swap vararg positions: + closure + namedparam1 <- lbase + namedparam2 + vararg1 + vararg2 + + TO + + closure + nil + nil + vararg1 + vararg2 + namedparam1 <- lbase + namedparam2 + */ + nvarargs := nargs - np + if nvarargs < 0 { + nvarargs = 0 + } + + ls.reg.SetTop(cf.LocalBase + nargs + np) + for i := 0; i < np; i++ { + //ls.reg.Set(cf.LocalBase+nargs+i, ls.reg.Get(cf.LocalBase+i)) + ls.reg.array[cf.LocalBase+nargs+i] = ls.reg.array[cf.LocalBase+i] + //ls.reg.Set(cf.LocalBase+i, LNil) + ls.reg.array[cf.LocalBase+i] = LNil + } + + if CompatVarArg { + ls.reg.SetTop(cf.LocalBase + nargs + np + 1) + if (proto.IsVarArg & VarArgNeedsArg) != 0 { + argtb := newLTable(nvarargs, 0) + for i := 0; i < nvarargs; i++ { + argtb.RawSetInt(i+1, ls.reg.Get(cf.LocalBase+np+i)) + } + argtb.RawSetString("n", LNumber(nvarargs)) + //ls.reg.Set(cf.LocalBase+nargs+np, argtb) + ls.reg.array[cf.LocalBase+nargs+np] = argtb + } else { + ls.reg.array[cf.LocalBase+nargs+np] = LNil + } + } + cf.LocalBase += nargs + maxreg := cf.LocalBase + int(proto.NumUsedRegisters) + ls.reg.SetTop(maxreg) + } + } +} // +inline-end + +func (ls *LState) pushCallFrame(cf callFrame, fn LValue, meta bool) { // +inline-start + if meta { + cf.NArgs++ + ls.reg.Insert(fn, cf.LocalBase) + } + if cf.Fn == nil { + ls.RaiseError("attempt to call a non-function object") + } + if ls.stack.sp == ls.Options.CallStackSize { + ls.RaiseError("stack overflow") + } + // +inline-call ls.stack.Push cf + newcf := ls.stack.Last() + // +inline-call ls.initCallFrame newcf + ls.currentFrame = newcf +} // +inline-end + +func (ls *LState) callR(nargs, nret, rbase int) { + base := ls.reg.Top() - nargs - 1 + if rbase < 0 { + rbase = base + } + lv := ls.reg.Get(base) + fn, meta := ls.metaCall(lv) + ls.pushCallFrame(callFrame{ + Fn: fn, + Pc: 0, + Base: base, + LocalBase: base + 1, + ReturnBase: rbase, + NArgs: nargs, + NRet: nret, + Parent: ls.currentFrame, + TailCall: 0, + }, lv, meta) + if ls.G.MainThread == nil { + ls.G.MainThread = ls + ls.G.CurrentThread = ls + ls.mainLoop(ls, nil) + } else { + ls.mainLoop(ls, ls.currentFrame) + } + if nret != MultRet { + ls.reg.SetTop(rbase + nret) + } +} + +func (ls *LState) getField(obj LValue, key LValue) LValue { + curobj := obj + for i := 0; i < MaxTableGetLoop; i++ { + tb, istable := curobj.(*LTable) + if istable { + ret := tb.RawGet(key) + if ret != LNil { + return ret + } + } + metaindex := ls.metaOp1(curobj, "__index") + if metaindex == LNil { + if !istable { + ls.RaiseError("attempt to index a non-table object(%v)", curobj.Type().String()) + } + return LNil + } + if metaindex.Type() == LTFunction { + ls.reg.Push(metaindex) + ls.reg.Push(curobj) + ls.reg.Push(key) + ls.Call(2, 1) + return ls.reg.Pop() + } else { + curobj = metaindex + } + } + ls.RaiseError("too many recursions in gettable") + return nil +} + +func (ls *LState) getFieldString(obj LValue, key string) LValue { + curobj := obj + for i := 0; i < MaxTableGetLoop; i++ { + tb, istable := curobj.(*LTable) + if istable { + ret := tb.RawGetString(key) + if ret != LNil { + return ret + } + } + metaindex := ls.metaOp1(curobj, "__index") + if metaindex == LNil { + if !istable { + ls.RaiseError("attempt to index a non-table object(%v)", curobj.Type().String()) + } + return LNil + } + if metaindex.Type() == LTFunction { + ls.reg.Push(metaindex) + ls.reg.Push(curobj) + ls.reg.Push(LString(key)) + ls.Call(2, 1) + return ls.reg.Pop() + } else { + curobj = metaindex + } + } + ls.RaiseError("too many recursions in gettable") + return nil +} + +func (ls *LState) setField(obj LValue, key LValue, value LValue) { + curobj := obj + for i := 0; i < MaxTableGetLoop; i++ { + tb, istable := curobj.(*LTable) + if istable { + if tb.RawGet(key) != LNil { + ls.RawSet(tb, key, value) + return + } + } + metaindex := ls.metaOp1(curobj, "__newindex") + if metaindex == LNil { + if !istable { + ls.RaiseError("attempt to index a non-table object(%v)", curobj.Type().String()) + } + ls.RawSet(tb, key, value) + return + } + if metaindex.Type() == LTFunction { + ls.reg.Push(metaindex) + ls.reg.Push(curobj) + ls.reg.Push(key) + ls.reg.Push(value) + ls.Call(3, 0) + return + } else { + curobj = metaindex + } + } + ls.RaiseError("too many recursions in settable") +} + +func (ls *LState) setFieldString(obj LValue, key string, value LValue) { + curobj := obj + for i := 0; i < MaxTableGetLoop; i++ { + tb, istable := curobj.(*LTable) + if istable { + if tb.RawGetString(key) != LNil { + tb.RawSetString(key, value) + return + } + } + metaindex := ls.metaOp1(curobj, "__newindex") + if metaindex == LNil { + if !istable { + ls.RaiseError("attempt to index a non-table object(%v)", curobj.Type().String()) + } + tb.RawSetString(key, value) + return + } + if metaindex.Type() == LTFunction { + ls.reg.Push(metaindex) + ls.reg.Push(curobj) + ls.reg.Push(LString(key)) + ls.reg.Push(value) + ls.Call(3, 0) + return + } else { + curobj = metaindex + } + } + ls.RaiseError("too many recursions in settable") +} + +/* }}} */ + +/* api methods {{{ */ + +func NewState(opts ...Options) *LState { + var ls *LState + if len(opts) == 0 { + ls = newLState(Options{ + CallStackSize: CallStackSize, + RegistrySize: RegistrySize, + }) + ls.OpenLibs() + } else { + if opts[0].CallStackSize < 1 { + opts[0].CallStackSize = CallStackSize + } + if opts[0].RegistrySize < 128 { + opts[0].RegistrySize = RegistrySize + } + ls = newLState(opts[0]) + if !opts[0].SkipOpenLibs { + ls.OpenLibs() + } + } + return ls +} + +func (ls *LState) Close() { + atomic.AddInt32(&ls.stop, 1) + for _, file := range ls.G.tempFiles { + // ignore errors in these operations + file.Close() + os.Remove(file.Name()) + } +} + +/* registry operations {{{ */ + +func (ls *LState) GetTop() int { + return ls.reg.Top() - ls.currentLocalBase() +} + +func (ls *LState) SetTop(idx int) { + base := ls.currentLocalBase() + newtop := ls.indexToReg(idx) + 1 + if newtop < base { + ls.reg.SetTop(base) + } else { + ls.reg.SetTop(newtop) + } +} + +func (ls *LState) Replace(idx int, value LValue) { + base := ls.currentLocalBase() + if idx > 0 { + reg := base + idx - 1 + if reg < ls.reg.Top() { + ls.reg.Set(reg, value) + } + } else if idx == 0 { + } else if idx > RegistryIndex { + if tidx := ls.reg.Top() + idx; tidx >= base { + ls.reg.Set(tidx, value) + } + } else { + switch idx { + case RegistryIndex: + if tb, ok := value.(*LTable); ok { + ls.G.Registry = tb + } else { + ls.RaiseError("registry must be a table(%v)", value.Type().String()) + } + case EnvironIndex: + if ls.currentFrame == nil { + ls.RaiseError("no calling environment") + } + if tb, ok := value.(*LTable); ok { + ls.currentFrame.Fn.Env = tb + } else { + ls.RaiseError("environment must be a table(%v)", value.Type().String()) + } + case GlobalsIndex: + if tb, ok := value.(*LTable); ok { + ls.G.Global = tb + } else { + ls.RaiseError("_G must be a table(%v)", value.Type().String()) + } + default: + fn := ls.currentFrame.Fn + index := GlobalsIndex - idx - 1 + if index < len(fn.Upvalues) { + fn.Upvalues[index].SetValue(value) + } + } + } +} + +func (ls *LState) Get(idx int) LValue { + base := ls.currentLocalBase() + if idx > 0 { + reg := base + idx - 1 + if reg < ls.reg.Top() { + return ls.reg.Get(reg) + } + return LNil + } else if idx == 0 { + return LNil + } else if idx > RegistryIndex { + tidx := ls.reg.Top() + idx + if tidx < base { + return LNil + } + return ls.reg.Get(tidx) + } else { + switch idx { + case RegistryIndex: + return ls.G.Registry + case EnvironIndex: + if ls.currentFrame == nil { + return ls.Env + } + return ls.currentFrame.Fn.Env + case GlobalsIndex: + return ls.G.Global + default: + fn := ls.currentFrame.Fn + index := GlobalsIndex - idx - 1 + if index < len(fn.Upvalues) { + return fn.Upvalues[index].Value() + } + return LNil + } + } + return LNil +} + +func (ls *LState) Push(value LValue) { + ls.reg.Push(value) +} + +func (ls *LState) Pop(n int) { + for i := 0; i < n; i++ { + if ls.GetTop() == 0 { + ls.RaiseError("register underflow") + } + ls.reg.Pop() + } +} + +func (ls *LState) Insert(value LValue, index int) { + reg := ls.indexToReg(index) + top := ls.reg.Top() + if reg >= top { + ls.reg.Set(reg, value) + return + } + if reg <= ls.currentLocalBase() { + reg = ls.currentLocalBase() + } + top-- + for ; top >= reg; top-- { + ls.reg.Set(top+1, ls.reg.Get(top)) + } + ls.reg.Set(reg, value) +} + +func (ls *LState) Remove(index int) { + reg := ls.indexToReg(index) + top := ls.reg.Top() + switch { + case reg >= top: + return + case reg < ls.currentLocalBase(): + return + case reg == top-1: + ls.Pop(1) + return + } + for i := reg; i < top-1; i++ { + ls.reg.Set(i, ls.reg.Get(i+1)) + } + ls.reg.SetTop(top - 1) +} + +/* }}} */ + +/* object allocation {{{ */ + +func (ls *LState) NewTable() *LTable { + return newLTable(defaultArrayCap, defaultHashCap) +} + +func (ls *LState) CreateTable(acap, hcap int) *LTable { + return newLTable(acap, hcap) +} + +// NewThread returns a new LState that shares with the original state all global objects. +// If the original state has context.Context, the new state has a new child context of the original state and this function returns its cancel function. +func (ls *LState) NewThread() (*LState, context.CancelFunc) { + thread := newLState(ls.Options) + thread.G = ls.G + thread.Env = ls.Env + var f context.CancelFunc = nil + if ls.ctx != nil { + thread.mainLoop = mainLoopWithContext + thread.ctx, f = context.WithCancel(ls.ctx) + } + return thread, f +} + +func (ls *LState) NewUserData() *LUserData { + return &LUserData{ + Env: ls.currentEnv(), + Metatable: LNil, + } +} + +func (ls *LState) NewFunction(fn LGFunction) *LFunction { + return newLFunctionG(fn, ls.currentEnv(), 0) +} + +func (ls *LState) NewClosure(fn LGFunction, upvalues ...LValue) *LFunction { + cl := newLFunctionG(fn, ls.currentEnv(), len(upvalues)) + for i, lv := range upvalues { + cl.Upvalues[i] = &Upvalue{} + cl.Upvalues[i].Close() + cl.Upvalues[i].SetValue(lv) + } + return cl +} + +/* }}} */ + +/* toType {{{ */ + +func (ls *LState) ToBool(n int) bool { + return LVAsBool(ls.Get(n)) +} + +func (ls *LState) ToInt(n int) int { + if lv, ok := ls.Get(n).(LNumber); ok { + return int(lv) + } + if lv, ok := ls.Get(n).(LString); ok { + if num, err := parseNumber(string(lv)); err == nil { + return int(num) + } + } + return 0 +} + +func (ls *LState) ToInt64(n int) int64 { + if lv, ok := ls.Get(n).(LNumber); ok { + return int64(lv) + } + if lv, ok := ls.Get(n).(LString); ok { + if num, err := parseNumber(string(lv)); err == nil { + return int64(num) + } + } + return 0 +} + +func (ls *LState) ToNumber(n int) LNumber { + return LVAsNumber(ls.Get(n)) +} + +func (ls *LState) ToString(n int) string { + return LVAsString(ls.Get(n)) +} + +func (ls *LState) ToTable(n int) *LTable { + if lv, ok := ls.Get(n).(*LTable); ok { + return lv + } + return nil +} + +func (ls *LState) ToFunction(n int) *LFunction { + if lv, ok := ls.Get(n).(*LFunction); ok { + return lv + } + return nil +} + +func (ls *LState) ToUserData(n int) *LUserData { + if lv, ok := ls.Get(n).(*LUserData); ok { + return lv + } + return nil +} + +func (ls *LState) ToThread(n int) *LState { + if lv, ok := ls.Get(n).(*LState); ok { + return lv + } + return nil +} + +/* }}} */ + +/* error & debug operations {{{ */ + +// This function is equivalent to luaL_error( http://www.lua.org/manual/5.1/manual.html#luaL_error ). +func (ls *LState) RaiseError(format string, args ...interface{}) { + ls.raiseError(1, format, args...) +} + +// This function is equivalent to lua_error( http://www.lua.org/manual/5.1/manual.html#lua_error ). +func (ls *LState) Error(lv LValue, level int) { + if str, ok := lv.(LString); ok { + ls.raiseError(level, string(str)) + } else { + if !ls.hasErrorFunc { + ls.closeAllUpvalues() + } + ls.Push(lv) + ls.Panic(ls) + } +} + +func (ls *LState) GetInfo(what string, dbg *Debug, fn LValue) (LValue, error) { + if !strings.HasPrefix(what, ">") { + fn = dbg.frame.Fn + } else { + what = what[1:] + } + f, ok := fn.(*LFunction) + if !ok { + return LNil, newApiErrorS(ApiErrorRun, "can not get debug info(an object in not a function)") + } + + retfn := false + for _, c := range what { + switch c { + case 'f': + retfn = true + case 'S': + if dbg.frame != nil && dbg.frame.Parent == nil { + dbg.What = "main" + } else if f.IsG { + dbg.What = "G" + } else if dbg.frame != nil && dbg.frame.TailCall > 0 { + dbg.What = "tail" + } else { + dbg.What = "Lua" + } + if !f.IsG { + dbg.Source = f.Proto.SourceName + dbg.LineDefined = f.Proto.LineDefined + dbg.LastLineDefined = f.Proto.LastLineDefined + } + case 'l': + if !f.IsG && dbg.frame != nil { + if dbg.frame.Pc > 0 { + dbg.CurrentLine = f.Proto.DbgSourcePositions[dbg.frame.Pc-1] + } + } else { + dbg.CurrentLine = -1 + } + case 'u': + dbg.NUpvalues = len(f.Upvalues) + case 'n': + if dbg.frame != nil { + dbg.Name = ls.rawFrameFuncName(dbg.frame) + } + default: + return LNil, newApiErrorS(ApiErrorRun, "invalid what: "+string(c)) + } + } + + if retfn { + return f, nil + } + return LNil, nil + +} + +func (ls *LState) GetStack(level int) (*Debug, bool) { + frame := ls.currentFrame + for ; level > 0 && frame != nil; frame = frame.Parent { + level-- + if !frame.Fn.IsG { + level -= frame.TailCall + } + } + + if level == 0 && frame != nil { + return &Debug{frame: frame}, true + } else if level < 0 && ls.stack.Sp() > 0 { + return &Debug{frame: ls.stack.At(0)}, true + } + return &Debug{}, false +} + +func (ls *LState) GetLocal(dbg *Debug, no int) (string, LValue) { + frame := dbg.frame + if name := ls.findLocal(frame, no); len(name) > 0 { + return name, ls.reg.Get(frame.LocalBase + no - 1) + } + return "", LNil +} + +func (ls *LState) SetLocal(dbg *Debug, no int, lv LValue) string { + frame := dbg.frame + if name := ls.findLocal(frame, no); len(name) > 0 { + ls.reg.Set(frame.LocalBase+no-1, lv) + return name + } + return "" +} + +func (ls *LState) GetUpvalue(fn *LFunction, no int) (string, LValue) { + if fn.IsG { + return "", LNil + } + + no-- + if no >= 0 && no < len(fn.Upvalues) { + return fn.Proto.DbgUpvalues[no], fn.Upvalues[no].Value() + } + return "", LNil +} + +func (ls *LState) SetUpvalue(fn *LFunction, no int, lv LValue) string { + if fn.IsG { + return "" + } + + no-- + if no >= 0 && no < len(fn.Upvalues) { + fn.Upvalues[no].SetValue(lv) + return fn.Proto.DbgUpvalues[no] + } + return "" +} + +/* }}} */ + +/* env operations {{{ */ + +func (ls *LState) GetFEnv(obj LValue) LValue { + switch lv := obj.(type) { + case *LFunction: + return lv.Env + case *LUserData: + return lv.Env + case *LState: + return lv.Env + } + return LNil +} + +func (ls *LState) SetFEnv(obj LValue, env LValue) { + tb, ok := env.(*LTable) + if !ok { + ls.RaiseError("cannot use %v as an environment", env.Type().String()) + } + + switch lv := obj.(type) { + case *LFunction: + lv.Env = tb + case *LUserData: + lv.Env = tb + case *LState: + lv.Env = tb + } + /* do nothing */ +} + +/* }}} */ + +/* table operations {{{ */ + +func (ls *LState) RawGet(tb *LTable, key LValue) LValue { + return tb.RawGet(key) +} + +func (ls *LState) RawGetInt(tb *LTable, key int) LValue { + return tb.RawGetInt(key) +} + +func (ls *LState) GetField(obj LValue, skey string) LValue { + return ls.getFieldString(obj, skey) +} + +func (ls *LState) GetTable(obj LValue, key LValue) LValue { + return ls.getField(obj, key) +} + +func (ls *LState) RawSet(tb *LTable, key LValue, value LValue) { + if n, ok := key.(LNumber); ok && math.IsNaN(float64(n)) { + ls.RaiseError("table index is NaN") + } else if key == LNil { + ls.RaiseError("table index is nil") + } + tb.RawSet(key, value) +} + +func (ls *LState) RawSetInt(tb *LTable, key int, value LValue) { + tb.RawSetInt(key, value) +} + +func (ls *LState) SetField(obj LValue, key string, value LValue) { + ls.setFieldString(obj, key, value) +} + +func (ls *LState) SetTable(obj LValue, key LValue, value LValue) { + ls.setField(obj, key, value) +} + +func (ls *LState) ForEach(tb *LTable, cb func(LValue, LValue)) { + tb.ForEach(cb) +} + +func (ls *LState) GetGlobal(name string) LValue { + return ls.GetField(ls.Get(GlobalsIndex), name) +} + +func (ls *LState) SetGlobal(name string, value LValue) { + ls.SetField(ls.Get(GlobalsIndex), name, value) +} + +func (ls *LState) Next(tb *LTable, key LValue) (LValue, LValue) { + return tb.Next(key) +} + +/* }}} */ + +/* unary operations {{{ */ + +func (ls *LState) ObjLen(v1 LValue) int { + if v1.Type() == LTString { + return len(string(v1.(LString))) + } + op := ls.metaOp1(v1, "__len") + if op.Type() == LTFunction { + ls.Push(op) + ls.Push(v1) + ls.Call(1, 1) + ret := ls.reg.Pop() + if ret.Type() == LTNumber { + return int(ret.(LNumber)) + } + } else if v1.Type() == LTTable { + return v1.(*LTable).Len() + } + return 0 +} + +/* }}} */ + +/* binary operations {{{ */ + +func (ls *LState) Concat(values ...LValue) string { + top := ls.reg.Top() + for _, value := range values { + ls.reg.Push(value) + } + ret := stringConcat(ls, len(values), ls.reg.Top()-1) + ls.reg.SetTop(top) + return LVAsString(ret) +} + +func (ls *LState) LessThan(lhs, rhs LValue) bool { + return lessThan(ls, lhs, rhs) +} + +func (ls *LState) Equal(lhs, rhs LValue) bool { + return equals(ls, lhs, rhs, false) +} + +func (ls *LState) RawEqual(lhs, rhs LValue) bool { + return equals(ls, lhs, rhs, true) +} + +/* }}} */ + +/* register operations {{{ */ + +func (ls *LState) Register(name string, fn LGFunction) { + ls.SetGlobal(name, ls.NewFunction(fn)) +} + +/* }}} */ + +/* load and function call operations {{{ */ + +func (ls *LState) Load(reader io.Reader, name string) (*LFunction, error) { + chunk, err := parse.Parse(reader, name) + if err != nil { + return nil, newApiErrorE(ApiErrorSyntax, err) + } + proto, err := Compile(chunk, name) + if err != nil { + return nil, newApiErrorE(ApiErrorSyntax, err) + } + return newLFunctionL(proto, ls.currentEnv(), 0), nil +} + +func (ls *LState) Call(nargs, nret int) { + ls.callR(nargs, nret, -1) +} + +func (ls *LState) PCall(nargs, nret int, errfunc *LFunction) (err error) { + err = nil + sp := ls.stack.Sp() + base := ls.reg.Top() - nargs - 1 + oldpanic := ls.Panic + ls.Panic = panicWithoutTraceback + if errfunc != nil { + ls.hasErrorFunc = true + } + defer func() { + ls.Panic = oldpanic + ls.hasErrorFunc = false + rcv := recover() + if rcv != nil { + if _, ok := rcv.(*ApiError); !ok { + err = newApiErrorS(ApiErrorPanic, fmt.Sprint(rcv)) + if ls.Options.IncludeGoStackTrace { + buf := make([]byte, 4096) + runtime.Stack(buf, false) + err.(*ApiError).StackTrace = strings.Trim(string(buf), "\000") + "\n" + ls.stackTrace(0) + } + } else { + err = rcv.(*ApiError) + } + if errfunc != nil { + ls.Push(errfunc) + ls.Push(err.(*ApiError).Object) + ls.Panic = panicWithoutTraceback + defer func() { + ls.Panic = oldpanic + rcv := recover() + if rcv != nil { + if _, ok := rcv.(*ApiError); !ok { + err = newApiErrorS(ApiErrorPanic, fmt.Sprint(rcv)) + if ls.Options.IncludeGoStackTrace { + buf := make([]byte, 4096) + runtime.Stack(buf, false) + err.(*ApiError).StackTrace = strings.Trim(string(buf), "\000") + ls.stackTrace(0) + } + } else { + err = rcv.(*ApiError) + err.(*ApiError).StackTrace = ls.stackTrace(0) + } + } + }() + ls.Call(1, 1) + err = newApiError(ApiErrorError, ls.Get(-1)) + } else if len(err.(*ApiError).StackTrace) == 0 { + err.(*ApiError).StackTrace = ls.stackTrace(0) + } + ls.reg.SetTop(base) + } + ls.stack.SetSp(sp) + if sp == 0 { + ls.currentFrame = nil + } + }() + + ls.Call(nargs, nret) + + return +} + +func (ls *LState) GPCall(fn LGFunction, data LValue) error { + ls.Push(newLFunctionG(fn, ls.currentEnv(), 0)) + ls.Push(data) + return ls.PCall(1, MultRet, nil) +} + +func (ls *LState) CallByParam(cp P, args ...LValue) error { + ls.Push(cp.Fn) + for _, arg := range args { + ls.Push(arg) + } + + if cp.Protect { + return ls.PCall(len(args), cp.NRet, cp.Handler) + } + ls.Call(len(args), cp.NRet) + return nil +} + +/* }}} */ + +/* metatable operations {{{ */ + +func (ls *LState) GetMetatable(obj LValue) LValue { + return ls.metatable(obj, false) +} + +func (ls *LState) SetMetatable(obj LValue, mt LValue) { + switch mt.(type) { + case *LNilType, *LTable: + default: + ls.RaiseError("metatable must be a table or nil, but got %v", mt.Type().String()) + } + + switch v := obj.(type) { + case *LTable: + v.Metatable = mt + case *LUserData: + v.Metatable = mt + default: + ls.G.builtinMts[int(obj.Type())] = mt + } +} + +/* }}} */ + +/* coroutine operations {{{ */ + +func (ls *LState) Status(th *LState) string { + status := "suspended" + if th.Dead { + status = "dead" + } else if ls.G.CurrentThread == th { + status = "running" + } else if ls.Parent == th { + status = "normal" + } + return status +} + +func (ls *LState) Resume(th *LState, fn *LFunction, args ...LValue) (ResumeState, error, []LValue) { + isstarted := th.isStarted() + if !isstarted { + base := 0 + th.stack.Push(callFrame{ + Fn: fn, + Pc: 0, + Base: base, + LocalBase: base + 1, + ReturnBase: base, + NArgs: 0, + NRet: MultRet, + Parent: nil, + TailCall: 0, + }) + } + + if ls.G.CurrentThread == th { + return ResumeError, newApiErrorS(ApiErrorRun, "can not resume a running thread"), nil + } + if th.Dead { + return ResumeError, newApiErrorS(ApiErrorRun, "can not resume a dead thread"), nil + } + th.Parent = ls + ls.G.CurrentThread = th + if !isstarted { + cf := th.stack.Last() + th.currentFrame = cf + th.SetTop(0) + for _, arg := range args { + th.Push(arg) + } + cf.NArgs = len(args) + th.initCallFrame(cf) + th.Panic = panicWithoutTraceback + } else { + for _, arg := range args { + th.Push(arg) + } + } + top := ls.GetTop() + threadRun(th) + haserror := LVIsFalse(ls.Get(top + 1)) + ret := make([]LValue, 0, ls.GetTop()) + for idx := top + 2; idx <= ls.GetTop(); idx++ { + ret = append(ret, ls.Get(idx)) + } + if len(ret) == 0 { + ret = append(ret, LNil) + } + ls.SetTop(top) + + if haserror { + return ResumeError, newApiError(ApiErrorRun, ret[0]), nil + } else if th.stack.IsEmpty() { + return ResumeOK, nil, ret + } + return ResumeYield, nil, ret +} + +func (ls *LState) Yield(values ...LValue) int { + ls.SetTop(0) + for _, lv := range values { + ls.Push(lv) + } + return -1 +} + +func (ls *LState) XMoveTo(other *LState, n int) { + if ls == other { + return + } + top := ls.GetTop() + n = intMin(n, top) + for i := n; i > 0; i-- { + other.Push(ls.Get(top - i + 1)) + } + ls.SetTop(top - n) +} + +/* }}} */ + +/* GopherLua original APIs {{{ */ + +// Set maximum memory size. This function can only be called from the main thread. +func (ls *LState) SetMx(mx int) { + if ls.Parent != nil { + ls.RaiseError("sub threads are not allowed to set a memory limit") + } + go func() { + limit := uint64(mx * 1024 * 1024) //MB + var s runtime.MemStats + for ls.stop == 0 { + runtime.ReadMemStats(&s) + if s.Alloc >= limit { + fmt.Println("out of memory") + os.Exit(3) + } + time.Sleep(100 * time.Millisecond) + } + }() +} + +// SetContext set a context ctx to this LState. The provided ctx must be non-nil. +func (ls *LState) SetContext(ctx context.Context) { + ls.mainLoop = mainLoopWithContext + ls.ctx = ctx +} + +// Context returns the LState's context. To change the context, use WithContext. +func (ls *LState) Context() context.Context { + return ls.ctx +} + +// RemoveContext removes the context associated with this LState and returns this context. +func (ls *LState) RemoveContext() context.Context { + oldctx := ls.ctx + ls.mainLoop = mainLoop + ls.ctx = nil + return oldctx +} + +// Converts the Lua value at the given acceptable index to the chan LValue. +func (ls *LState) ToChannel(n int) chan LValue { + if lv, ok := ls.Get(n).(LChannel); ok { + return (chan LValue)(lv) + } + return nil +} + +/* }}} */ + +/* }}} */ + +// diff --git a/vendor/github.com/yuin/gopher-lua/_vm.go b/vendor/github.com/yuin/gopher-lua/_vm.go new file mode 100644 index 00000000000..d5c06675fcc --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/_vm.go @@ -0,0 +1,1019 @@ +package lua + +import ( + "fmt" + "math" + "strings" +) + +func mainLoop(L *LState, baseframe *callFrame) { + var inst uint32 + var cf *callFrame + + if L.stack.IsEmpty() { + return + } + + L.currentFrame = L.stack.Last() + if L.currentFrame.Fn.IsG { + callGFunction(L, false) + return + } + + for { + cf = L.currentFrame + inst = cf.Fn.Proto.Code[cf.Pc] + cf.Pc++ + if jumpTable[int(inst>>26)](L, inst, baseframe) == 1 { + return + } + } +} + +func mainLoopWithContext(L *LState, baseframe *callFrame) { + var inst uint32 + var cf *callFrame + + if L.stack.IsEmpty() { + return + } + + L.currentFrame = L.stack.Last() + if L.currentFrame.Fn.IsG { + callGFunction(L, false) + return + } + + for { + cf = L.currentFrame + inst = cf.Fn.Proto.Code[cf.Pc] + cf.Pc++ + select { + case <-L.ctx.Done(): + L.RaiseError(L.ctx.Err().Error()) + return + default: + if jumpTable[int(inst>>26)](L, inst, baseframe) == 1 { + return + } + } + } +} + +func copyReturnValues(L *LState, regv, start, n, b int) { // +inline-start + if b == 1 { + // +inline-call L.reg.FillNil regv n + } else { + // +inline-call L.reg.CopyRange regv start -1 n + } +} // +inline-end + +func switchToParentThread(L *LState, nargs int, haserror bool, kill bool) { + parent := L.Parent + if parent == nil { + L.RaiseError("can not yield from outside of a coroutine") + } + L.G.CurrentThread = parent + L.Parent = nil + if !L.wrapped { + if haserror { + parent.Push(LFalse) + } else { + parent.Push(LTrue) + } + } + L.XMoveTo(parent, nargs) + L.stack.Pop() + offset := L.currentFrame.LocalBase - L.currentFrame.ReturnBase + L.currentFrame = L.stack.Last() + L.reg.SetTop(L.reg.Top() - offset) // remove 'yield' function(including tailcalled functions) + if kill { + L.kill() + } +} + +func callGFunction(L *LState, tailcall bool) bool { + frame := L.currentFrame + gfnret := frame.Fn.GFunction(L) + if tailcall { + L.stack.Remove(L.stack.Sp() - 2) // remove caller lua function frame + L.currentFrame = L.stack.Last() + } + + if gfnret < 0 { + switchToParentThread(L, L.GetTop(), false, false) + return true + } + + wantret := frame.NRet + if wantret == MultRet { + wantret = gfnret + } + + if tailcall && L.Parent != nil && L.stack.Sp() == 1 { + switchToParentThread(L, wantret, false, true) + return true + } + + // +inline-call L.reg.CopyRange frame.ReturnBase L.reg.Top()-gfnret -1 wantret + L.stack.Pop() + L.currentFrame = L.stack.Last() + return false +} + +func threadRun(L *LState) { + if L.stack.IsEmpty() { + return + } + + defer func() { + if rcv := recover(); rcv != nil { + var lv LValue + if v, ok := rcv.(*ApiError); ok { + lv = v.Object + } else { + lv = LString(fmt.Sprint(rcv)) + } + if parent := L.Parent; parent != nil { + if L.wrapped { + L.Push(lv) + parent.Panic(L) + } else { + L.SetTop(0) + L.Push(lv) + switchToParentThread(L, 1, true, true) + } + } else { + panic(rcv) + } + } + }() + L.mainLoop(L, nil) +} + +type instFunc func(*LState, uint32, *callFrame) int + +var jumpTable [opCodeMax + 1]instFunc + +func init() { + jumpTable = [opCodeMax + 1]instFunc{ + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_MOVE + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + reg.Set(RA, reg.Get(lbase+B)) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_MOVEN + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + reg.Set(lbase+A, reg.Get(lbase+B)) + code := cf.Fn.Proto.Code + pc := cf.Pc + for i := 0; i < C; i++ { + inst = code[pc] + pc++ + A = int(inst>>18) & 0xff //GETA + B = int(inst & 0x1ff) //GETB + reg.Set(lbase+A, reg.Get(lbase+B)) + } + cf.Pc = pc + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LOADK + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + Bx := int(inst & 0x3ffff) //GETBX + reg.Set(RA, cf.Fn.Proto.Constants[Bx]) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LOADBOOL + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + if B != 0 { + reg.Set(RA, LTrue) + } else { + reg.Set(RA, LFalse) + } + if C != 0 { + cf.Pc++ + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LOADNIL + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + for i := RA; i <= lbase+B; i++ { + reg.Set(i, LNil) + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETUPVAL + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + reg.Set(RA, cf.Fn.Upvalues[B].Value()) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETGLOBAL + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + Bx := int(inst & 0x3ffff) //GETBX + //reg.Set(RA, L.getField(cf.Fn.Env, cf.Fn.Proto.Constants[Bx])) + reg.Set(RA, L.getFieldString(cf.Fn.Env, cf.Fn.Proto.stringConstants[Bx])) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETTABLE + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + reg.Set(RA, L.getField(reg.Get(lbase+B), L.rkValue(C))) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETTABLEKS + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + reg.Set(RA, L.getFieldString(reg.Get(lbase+B), L.rkString(C))) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETGLOBAL + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + Bx := int(inst & 0x3ffff) //GETBX + //L.setField(cf.Fn.Env, cf.Fn.Proto.Constants[Bx], reg.Get(RA)) + L.setFieldString(cf.Fn.Env, cf.Fn.Proto.stringConstants[Bx], reg.Get(RA)) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETUPVAL + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + cf.Fn.Upvalues[B].SetValue(reg.Get(RA)) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETTABLE + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + L.setField(reg.Get(RA), L.rkValue(B), L.rkValue(C)) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETTABLEKS + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + L.setFieldString(reg.Get(RA), L.rkString(B), L.rkValue(C)) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_NEWTABLE + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + reg.Set(RA, newLTable(B, C)) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SELF + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + selfobj := reg.Get(lbase + B) + reg.Set(RA, L.getFieldString(selfobj, L.rkString(C))) + reg.Set(RA+1, selfobj) + return 0 + }, + opArith, // OP_ADD + opArith, // OP_SUB + opArith, // OP_MUL + opArith, // OP_DIV + opArith, // OP_MOD + opArith, // OP_POW + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_UNM + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + unaryv := L.rkValue(B) + if nm, ok := unaryv.(LNumber); ok { + reg.SetNumber(RA, -nm) + } else { + op := L.metaOp1(unaryv, "__unm") + if op.Type() == LTFunction { + reg.Push(op) + reg.Push(unaryv) + L.Call(1, 1) + reg.Set(RA, reg.Pop()) + } else if str, ok1 := unaryv.(LString); ok1 { + if num, err := parseNumber(string(str)); err == nil { + reg.Set(RA, -num) + } else { + L.RaiseError("__unm undefined") + } + } else { + L.RaiseError("__unm undefined") + } + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_NOT + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + if LVIsFalse(reg.Get(lbase + B)) { + reg.Set(RA, LTrue) + } else { + reg.Set(RA, LFalse) + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LEN + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + switch lv := L.rkValue(B).(type) { + case LString: + reg.SetNumber(RA, LNumber(len(lv))) + default: + op := L.metaOp1(lv, "__len") + if op.Type() == LTFunction { + reg.Push(op) + reg.Push(lv) + L.Call(1, 1) + reg.Set(RA, reg.Pop()) + } else if lv.Type() == LTTable { + reg.SetNumber(RA, LNumber(lv.(*LTable).Len())) + } else { + L.RaiseError("__len undefined") + } + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_CONCAT + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + RC := lbase + C + RB := lbase + B + reg.Set(RA, stringConcat(L, RC-RB+1, RC)) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_JMP + cf := L.currentFrame + Sbx := int(inst&0x3ffff) - opMaxArgSbx //GETSBX + cf.Pc += Sbx + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_EQ + cf := L.currentFrame + A := int(inst>>18) & 0xff //GETA + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + ret := equals(L, L.rkValue(B), L.rkValue(C), false) + v := 1 + if ret { + v = 0 + } + if v == A { + cf.Pc++ + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LT + cf := L.currentFrame + A := int(inst>>18) & 0xff //GETA + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + ret := lessThan(L, L.rkValue(B), L.rkValue(C)) + v := 1 + if ret { + v = 0 + } + if v == A { + cf.Pc++ + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LE + cf := L.currentFrame + A := int(inst>>18) & 0xff //GETA + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + lhs := L.rkValue(B) + rhs := L.rkValue(C) + ret := false + + if v1, ok1 := lhs.assertFloat64(); ok1 { + if v2, ok2 := rhs.assertFloat64(); ok2 { + ret = v1 <= v2 + } else { + L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String()) + } + } else { + if lhs.Type() != rhs.Type() { + L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String()) + } + switch lhs.Type() { + case LTString: + ret = strCmp(string(lhs.(LString)), string(rhs.(LString))) <= 0 + default: + switch objectRational(L, lhs, rhs, "__le") { + case 1: + ret = true + case 0: + ret = false + default: + ret = !objectRationalWithError(L, rhs, lhs, "__lt") + } + } + } + + v := 1 + if ret { + v = 0 + } + if v == A { + cf.Pc++ + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_TEST + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + C := int(inst>>9) & 0x1ff //GETC + if LVAsBool(reg.Get(RA)) == (C == 0) { + cf.Pc++ + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_TESTSET + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + if value := reg.Get(lbase + B); LVAsBool(value) != (C == 0) { + reg.Set(RA, value) + } else { + cf.Pc++ + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_CALL + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + nargs := B - 1 + if B == 0 { + nargs = reg.Top() - (RA + 1) + } + lv := reg.Get(RA) + nret := C - 1 + var callable *LFunction + var meta bool + if fn, ok := lv.assertFunction(); ok { + callable = fn + meta = false + } else { + callable, meta = L.metaCall(lv) + } + // +inline-call L.pushCallFrame callFrame{Fn:callable,Pc:0,Base:RA,LocalBase:RA+1,ReturnBase:RA,NArgs:nargs,NRet:nret,Parent:cf,TailCall:0} lv meta + if callable.IsG && callGFunction(L, false) { + return 1 + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_TAILCALL + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + nargs := B - 1 + if B == 0 { + nargs = reg.Top() - (RA + 1) + } + lv := reg.Get(RA) + var callable *LFunction + var meta bool + if fn, ok := lv.assertFunction(); ok { + callable = fn + meta = false + } else { + callable, meta = L.metaCall(lv) + } + if callable == nil { + L.RaiseError("attempt to call a non-function object") + } + // +inline-call L.closeUpvalues lbase + if callable.IsG { + luaframe := cf + L.pushCallFrame(callFrame{ + Fn: callable, + Pc: 0, + Base: RA, + LocalBase: RA + 1, + ReturnBase: cf.ReturnBase, + NArgs: nargs, + NRet: cf.NRet, + Parent: cf, + TailCall: 0, + }, lv, meta) + if callGFunction(L, true) { + return 1 + } + if L.currentFrame == nil || L.currentFrame.Fn.IsG || luaframe == baseframe { + return 1 + } + } else { + base := cf.Base + cf.Fn = callable + cf.Pc = 0 + cf.Base = RA + cf.LocalBase = RA + 1 + cf.ReturnBase = cf.ReturnBase + cf.NArgs = nargs + cf.NRet = cf.NRet + cf.TailCall++ + lbase := cf.LocalBase + if meta { + cf.NArgs++ + L.reg.Insert(lv, cf.LocalBase) + } + // +inline-call L.initCallFrame cf + // +inline-call L.reg.CopyRange base RA -1 reg.Top()-RA-1 + cf.Base = base + cf.LocalBase = base + (cf.LocalBase - lbase + 1) + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_RETURN + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + // +inline-call L.closeUpvalues lbase + nret := B - 1 + if B == 0 { + nret = reg.Top() - RA + } + n := cf.NRet + if cf.NRet == MultRet { + n = nret + } + + if L.Parent != nil && L.stack.Sp() == 1 { + // +inline-call copyReturnValues L reg.Top() RA n B + switchToParentThread(L, n, false, true) + return 1 + } + islast := baseframe == L.stack.Pop() || L.stack.IsEmpty() + // +inline-call copyReturnValues L cf.ReturnBase RA n B + L.currentFrame = L.stack.Last() + if islast || L.currentFrame == nil || L.currentFrame.Fn.IsG { + return 1 + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_FORLOOP + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + if init, ok1 := reg.Get(RA).assertFloat64(); ok1 { + if limit, ok2 := reg.Get(RA + 1).assertFloat64(); ok2 { + if step, ok3 := reg.Get(RA + 2).assertFloat64(); ok3 { + init += step + reg.SetNumber(RA, LNumber(init)) + if (step > 0 && init <= limit) || (step <= 0 && init >= limit) { + Sbx := int(inst&0x3ffff) - opMaxArgSbx //GETSBX + cf.Pc += Sbx + reg.SetNumber(RA+3, LNumber(init)) + } else { + reg.SetTop(RA + 1) + } + } else { + L.RaiseError("for statement step must be a number") + } + } else { + L.RaiseError("for statement limit must be a number") + } + } else { + L.RaiseError("for statement init must be a number") + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_FORPREP + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + Sbx := int(inst&0x3ffff) - opMaxArgSbx //GETSBX + if init, ok1 := reg.Get(RA).assertFloat64(); ok1 { + if step, ok2 := reg.Get(RA + 2).assertFloat64(); ok2 { + reg.SetNumber(RA, LNumber(init-step)) + } else { + L.RaiseError("for statement step must be a number") + } + } else { + L.RaiseError("for statement init must be a number") + } + cf.Pc += Sbx + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_TFORLOOP + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + C := int(inst>>9) & 0x1ff //GETC + nret := C + reg.SetTop(RA + 3 + 2) + reg.Set(RA+3+2, reg.Get(RA+2)) + reg.Set(RA+3+1, reg.Get(RA+1)) + reg.Set(RA+3, reg.Get(RA)) + L.callR(2, nret, RA+3) + if value := reg.Get(RA + 3); value != LNil { + reg.Set(RA+2, value) + pc := cf.Fn.Proto.Code[cf.Pc] + cf.Pc += int(pc&0x3ffff) - opMaxArgSbx + } + cf.Pc++ + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETLIST + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + if C == 0 { + C = int(cf.Fn.Proto.Code[cf.Pc]) + cf.Pc++ + } + offset := (C - 1) * FieldsPerFlush + table := reg.Get(RA).(*LTable) + nelem := B + if B == 0 { + nelem = reg.Top() - RA - 1 + } + for i := 1; i <= nelem; i++ { + table.RawSetInt(offset+i, reg.Get(RA+i)) + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_CLOSE + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + // +inline-call L.closeUpvalues RA + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_CLOSURE + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + Bx := int(inst & 0x3ffff) //GETBX + proto := cf.Fn.Proto.FunctionPrototypes[Bx] + closure := newLFunctionL(proto, cf.Fn.Env, int(proto.NumUpvalues)) + reg.Set(RA, closure) + for i := 0; i < int(proto.NumUpvalues); i++ { + inst = cf.Fn.Proto.Code[cf.Pc] + cf.Pc++ + B := opGetArgB(inst) + switch opGetOpCode(inst) { + case OP_MOVE: + closure.Upvalues[i] = L.findUpvalue(lbase + B) + case OP_GETUPVAL: + closure.Upvalues[i] = cf.Fn.Upvalues[B] + } + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_VARARG + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + nparams := int(cf.Fn.Proto.NumParameters) + nvarargs := cf.NArgs - nparams + if nvarargs < 0 { + nvarargs = 0 + } + nwant := B - 1 + if B == 0 { + nwant = nvarargs + } + // +inline-call reg.CopyRange RA cf.Base+nparams+1 cf.LocalBase nwant + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_NOP + return 0 + }, + } +} + +func opArith(L *LState, inst uint32, baseframe *callFrame) int { //OP_ADD, OP_SUB, OP_MUL, OP_DIV, OP_MOD, OP_POW + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + opcode := int(inst >> 26) //GETOPCODE + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + lhs := L.rkValue(B) + rhs := L.rkValue(C) + v1, ok1 := lhs.assertFloat64() + v2, ok2 := rhs.assertFloat64() + if ok1 && ok2 { + reg.SetNumber(RA, numberArith(L, opcode, LNumber(v1), LNumber(v2))) + } else { + reg.Set(RA, objectArith(L, opcode, lhs, rhs)) + } + return 0 +} + +func luaModulo(lhs, rhs LNumber) LNumber { + flhs := float64(lhs) + frhs := float64(rhs) + v := math.Mod(flhs, frhs) + if flhs < 0 || frhs < 0 && !(flhs < 0 && frhs < 0) { + v += frhs + } + return LNumber(v) +} + +func numberArith(L *LState, opcode int, lhs, rhs LNumber) LNumber { + switch opcode { + case OP_ADD: + return lhs + rhs + case OP_SUB: + return lhs - rhs + case OP_MUL: + return lhs * rhs + case OP_DIV: + return lhs / rhs + case OP_MOD: + return luaModulo(lhs, rhs) + case OP_POW: + flhs := float64(lhs) + frhs := float64(rhs) + return LNumber(math.Pow(flhs, frhs)) + } + panic("should not reach here") + return LNumber(0) +} + +func objectArith(L *LState, opcode int, lhs, rhs LValue) LValue { + event := "" + switch opcode { + case OP_ADD: + event = "__add" + case OP_SUB: + event = "__sub" + case OP_MUL: + event = "__mul" + case OP_DIV: + event = "__div" + case OP_MOD: + event = "__mod" + case OP_POW: + event = "__pow" + } + op := L.metaOp2(lhs, rhs, event) + if op.Type() == LTFunction { + L.reg.Push(op) + L.reg.Push(lhs) + L.reg.Push(rhs) + L.Call(2, 1) + return L.reg.Pop() + } + if str, ok := lhs.(LString); ok { + if lnum, err := parseNumber(string(str)); err == nil { + lhs = lnum + } + } + if str, ok := rhs.(LString); ok { + if rnum, err := parseNumber(string(str)); err == nil { + rhs = rnum + } + } + if v1, ok1 := lhs.assertFloat64(); ok1 { + if v2, ok2 := rhs.assertFloat64(); ok2 { + return numberArith(L, opcode, LNumber(v1), LNumber(v2)) + } + } + L.RaiseError(fmt.Sprintf("cannot perform %v operation between %v and %v", + strings.TrimLeft(event, "_"), lhs.Type().String(), rhs.Type().String())) + + return LNil +} + +func stringConcat(L *LState, total, last int) LValue { + rhs := L.reg.Get(last) + total-- + for i := last - 1; total > 0; { + lhs := L.reg.Get(i) + if !(LVCanConvToString(lhs) && LVCanConvToString(rhs)) { + op := L.metaOp2(lhs, rhs, "__concat") + if op.Type() == LTFunction { + L.reg.Push(op) + L.reg.Push(lhs) + L.reg.Push(rhs) + L.Call(2, 1) + rhs = L.reg.Pop() + total-- + i-- + } else { + L.RaiseError("cannot perform concat operation between %v and %v", lhs.Type().String(), rhs.Type().String()) + return LNil + } + } else { + buf := make([]string, total+1) + buf[total] = LVAsString(rhs) + for total > 0 { + lhs = L.reg.Get(i) + if !LVCanConvToString(lhs) { + break + } + buf[total-1] = LVAsString(lhs) + i-- + total-- + } + rhs = LString(strings.Join(buf, "")) + } + } + return rhs +} + +func lessThan(L *LState, lhs, rhs LValue) bool { + // optimization for numbers + if v1, ok1 := lhs.assertFloat64(); ok1 { + if v2, ok2 := rhs.assertFloat64(); ok2 { + return v1 < v2 + } + L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String()) + } + if lhs.Type() != rhs.Type() { + L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String()) + return false + } + ret := false + switch lhs.Type() { + case LTString: + ret = strCmp(string(lhs.(LString)), string(rhs.(LString))) < 0 + default: + ret = objectRationalWithError(L, lhs, rhs, "__lt") + } + return ret +} + +func equals(L *LState, lhs, rhs LValue, raw bool) bool { + if lhs.Type() != rhs.Type() { + return false + } + + ret := false + switch lhs.Type() { + case LTNil: + ret = true + case LTNumber: + v1, _ := lhs.assertFloat64() + v2, _ := rhs.assertFloat64() + ret = v1 == v2 + case LTBool: + ret = bool(lhs.(LBool)) == bool(rhs.(LBool)) + case LTString: + ret = string(lhs.(LString)) == string(rhs.(LString)) + case LTUserData, LTTable: + if lhs == rhs { + ret = true + } else if !raw { + switch objectRational(L, lhs, rhs, "__eq") { + case 1: + ret = true + default: + ret = false + } + } + default: + ret = lhs == rhs + } + return ret +} + +func objectRationalWithError(L *LState, lhs, rhs LValue, event string) bool { + switch objectRational(L, lhs, rhs, event) { + case 1: + return true + case 0: + return false + } + L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String()) + return false +} + +func objectRational(L *LState, lhs, rhs LValue, event string) int { + m1 := L.metaOp1(lhs, event) + m2 := L.metaOp1(rhs, event) + if m1.Type() == LTFunction && m1 == m2 { + L.reg.Push(m1) + L.reg.Push(lhs) + L.reg.Push(rhs) + L.Call(2, 1) + if LVAsBool(L.reg.Pop()) { + return 1 + } + return 0 + } + return -1 +} diff --git a/vendor/github.com/yuin/gopher-lua/alloc.go b/vendor/github.com/yuin/gopher-lua/alloc.go new file mode 100644 index 00000000000..4db466090ae --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/alloc.go @@ -0,0 +1,73 @@ +package lua + +import ( + "reflect" + "unsafe" +) + +// iface is an internal representation of the go-interface. +type iface struct { + itab unsafe.Pointer + word unsafe.Pointer +} + +const preloadLimit LNumber = 128 + +var _fv float64 +var _uv uintptr + +// allocator is a fast bulk memory allocator for the LValue. +type allocator struct { + top int + size int + nptrs []LValue + nheader *reflect.SliceHeader + fptrs []float64 + fheader *reflect.SliceHeader + itabLNumber unsafe.Pointer + preloads [int(preloadLimit)]LValue +} + +func newAllocator(size int) *allocator { + al := &allocator{ + top: 0, + size: size, + nptrs: make([]LValue, size), + nheader: nil, + fptrs: make([]float64, size), + fheader: nil, + itabLNumber: unsafe.Pointer(nil), + } + al.nheader = (*reflect.SliceHeader)(unsafe.Pointer(&al.nptrs)) + al.fheader = (*reflect.SliceHeader)(unsafe.Pointer(&al.fptrs)) + + var v LValue = LNumber(0) + vp := (*iface)(unsafe.Pointer(&v)) + al.itabLNumber = vp.itab + for i := 0; i < int(preloadLimit); i++ { + al.preloads[i] = LNumber(i) + } + return al +} + +func (al *allocator) LNumber2I(v LNumber) LValue { + if v >= 0 && v < preloadLimit && float64(v) == float64(int64(v)) { + return al.preloads[int(v)] + } + if al.top == len(al.nptrs)-1 { + al.top = 0 + al.nptrs = make([]LValue, al.size) + al.nheader = (*reflect.SliceHeader)(unsafe.Pointer(&al.nptrs)) + al.fptrs = make([]float64, al.size) + al.fheader = (*reflect.SliceHeader)(unsafe.Pointer(&al.fptrs)) + } + fptr := (*float64)(unsafe.Pointer(al.fheader.Data + uintptr(al.top)*unsafe.Sizeof(_fv))) + e := *(*LValue)(unsafe.Pointer(al.nheader.Data + uintptr(al.top)*unsafe.Sizeof(_uv))) + al.top++ + + ep := (*iface)(unsafe.Pointer(&e)) + ep.itab = al.itabLNumber + *fptr = float64(v) + ep.word = unsafe.Pointer(fptr) + return e +} diff --git a/vendor/github.com/yuin/gopher-lua/ast/ast.go b/vendor/github.com/yuin/gopher-lua/ast/ast.go new file mode 100644 index 00000000000..f337a294732 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/ast/ast.go @@ -0,0 +1,29 @@ +package ast + +type PositionHolder interface { + Line() int + SetLine(int) + LastLine() int + SetLastLine(int) +} + +type Node struct { + line int + lastline int +} + +func (self *Node) Line() int { + return self.line +} + +func (self *Node) SetLine(line int) { + self.line = line +} + +func (self *Node) LastLine() int { + return self.lastline +} + +func (self *Node) SetLastLine(line int) { + self.lastline = line +} diff --git a/vendor/github.com/yuin/gopher-lua/ast/expr.go b/vendor/github.com/yuin/gopher-lua/ast/expr.go new file mode 100644 index 00000000000..ccda3279101 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/ast/expr.go @@ -0,0 +1,137 @@ +package ast + +type Expr interface { + PositionHolder + exprMarker() +} + +type ExprBase struct { + Node +} + +func (expr *ExprBase) exprMarker() {} + +/* ConstExprs {{{ */ + +type ConstExpr interface { + Expr + constExprMarker() +} + +type ConstExprBase struct { + ExprBase +} + +func (expr *ConstExprBase) constExprMarker() {} + +type TrueExpr struct { + ConstExprBase +} + +type FalseExpr struct { + ConstExprBase +} + +type NilExpr struct { + ConstExprBase +} + +type NumberExpr struct { + ConstExprBase + + Value string +} + +type StringExpr struct { + ConstExprBase + + Value string +} + +/* ConstExprs }}} */ + +type Comma3Expr struct { + ExprBase +} + +type IdentExpr struct { + ExprBase + + Value string +} + +type AttrGetExpr struct { + ExprBase + + Object Expr + Key Expr +} + +type TableExpr struct { + ExprBase + + Fields []*Field +} + +type FuncCallExpr struct { + ExprBase + + Func Expr + Receiver Expr + Method string + Args []Expr + AdjustRet bool +} + +type LogicalOpExpr struct { + ExprBase + + Operator string + Lhs Expr + Rhs Expr +} + +type RelationalOpExpr struct { + ExprBase + + Operator string + Lhs Expr + Rhs Expr +} + +type StringConcatOpExpr struct { + ExprBase + + Lhs Expr + Rhs Expr +} + +type ArithmeticOpExpr struct { + ExprBase + + Operator string + Lhs Expr + Rhs Expr +} + +type UnaryMinusOpExpr struct { + ExprBase + Expr Expr +} + +type UnaryNotOpExpr struct { + ExprBase + Expr Expr +} + +type UnaryLenOpExpr struct { + ExprBase + Expr Expr +} + +type FunctionExpr struct { + ExprBase + + ParList *ParList + Stmts []Stmt +} diff --git a/vendor/github.com/yuin/gopher-lua/ast/misc.go b/vendor/github.com/yuin/gopher-lua/ast/misc.go new file mode 100644 index 00000000000..d811c042aa0 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/ast/misc.go @@ -0,0 +1,17 @@ +package ast + +type Field struct { + Key Expr + Value Expr +} + +type ParList struct { + HasVargs bool + Names []string +} + +type FuncName struct { + Func Expr + Receiver Expr + Method string +} diff --git a/vendor/github.com/yuin/gopher-lua/ast/stmt.go b/vendor/github.com/yuin/gopher-lua/ast/stmt.go new file mode 100644 index 00000000000..56ea6d1a23a --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/ast/stmt.go @@ -0,0 +1,95 @@ +package ast + +type Stmt interface { + PositionHolder + stmtMarker() +} + +type StmtBase struct { + Node +} + +func (stmt *StmtBase) stmtMarker() {} + +type AssignStmt struct { + StmtBase + + Lhs []Expr + Rhs []Expr +} + +type LocalAssignStmt struct { + StmtBase + + Names []string + Exprs []Expr +} + +type FuncCallStmt struct { + StmtBase + + Expr Expr +} + +type DoBlockStmt struct { + StmtBase + + Stmts []Stmt +} + +type WhileStmt struct { + StmtBase + + Condition Expr + Stmts []Stmt +} + +type RepeatStmt struct { + StmtBase + + Condition Expr + Stmts []Stmt +} + +type IfStmt struct { + StmtBase + + Condition Expr + Then []Stmt + Else []Stmt +} + +type NumberForStmt struct { + StmtBase + + Name string + Init Expr + Limit Expr + Step Expr + Stmts []Stmt +} + +type GenericForStmt struct { + StmtBase + + Names []string + Exprs []Expr + Stmts []Stmt +} + +type FuncDefStmt struct { + StmtBase + + Name *FuncName + Func *FunctionExpr +} + +type ReturnStmt struct { + StmtBase + + Exprs []Expr +} + +type BreakStmt struct { + StmtBase +} diff --git a/vendor/github.com/yuin/gopher-lua/ast/token.go b/vendor/github.com/yuin/gopher-lua/ast/token.go new file mode 100644 index 00000000000..820467c9a86 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/ast/token.go @@ -0,0 +1,22 @@ +package ast + +import ( + "fmt" +) + +type Position struct { + Source string + Line int + Column int +} + +type Token struct { + Type int + Name string + Str string + Pos Position +} + +func (self *Token) String() string { + return fmt.Sprintf("", self.Name, self.Str) +} diff --git a/vendor/github.com/yuin/gopher-lua/auxlib.go b/vendor/github.com/yuin/gopher-lua/auxlib.go new file mode 100644 index 00000000000..15199390cc5 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/auxlib.go @@ -0,0 +1,458 @@ +package lua + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +/* checkType {{{ */ + +func (ls *LState) CheckAny(n int) LValue { + if n > ls.GetTop() { + ls.ArgError(n, "value expected") + } + return ls.Get(n) +} + +func (ls *LState) CheckInt(n int) int { + v := ls.Get(n) + if intv, ok := v.(LNumber); ok { + return int(intv) + } + ls.TypeError(n, LTNumber) + return 0 +} + +func (ls *LState) CheckInt64(n int) int64 { + v := ls.Get(n) + if intv, ok := v.(LNumber); ok { + return int64(intv) + } + ls.TypeError(n, LTNumber) + return 0 +} + +func (ls *LState) CheckNumber(n int) LNumber { + v := ls.Get(n) + if lv, ok := v.(LNumber); ok { + return lv + } + ls.TypeError(n, LTNumber) + return 0 +} + +func (ls *LState) CheckString(n int) string { + v := ls.Get(n) + if lv, ok := v.(LString); ok { + return string(lv) + } + ls.TypeError(n, LTString) + return "" +} + +func (ls *LState) CheckBool(n int) bool { + v := ls.Get(n) + if lv, ok := v.(LBool); ok { + return bool(lv) + } + ls.TypeError(n, LTBool) + return false +} + +func (ls *LState) CheckTable(n int) *LTable { + v := ls.Get(n) + if lv, ok := v.(*LTable); ok { + return lv + } + ls.TypeError(n, LTTable) + return nil +} + +func (ls *LState) CheckFunction(n int) *LFunction { + v := ls.Get(n) + if lv, ok := v.(*LFunction); ok { + return lv + } + ls.TypeError(n, LTFunction) + return nil +} + +func (ls *LState) CheckUserData(n int) *LUserData { + v := ls.Get(n) + if lv, ok := v.(*LUserData); ok { + return lv + } + ls.TypeError(n, LTUserData) + return nil +} + +func (ls *LState) CheckThread(n int) *LState { + v := ls.Get(n) + if lv, ok := v.(*LState); ok { + return lv + } + ls.TypeError(n, LTThread) + return nil +} + +func (ls *LState) CheckType(n int, typ LValueType) { + v := ls.Get(n) + if v.Type() != typ { + ls.TypeError(n, typ) + } +} + +func (ls *LState) CheckTypes(n int, typs ...LValueType) { + vt := ls.Get(n).Type() + for _, typ := range typs { + if vt == typ { + return + } + } + buf := []string{} + for _, typ := range typs { + buf = append(buf, typ.String()) + } + ls.ArgError(n, strings.Join(buf, " or ")+" expected, got "+ls.Get(n).Type().String()) +} + +func (ls *LState) CheckOption(n int, options []string) int { + str := ls.CheckString(n) + for i, v := range options { + if v == str { + return i + } + } + ls.ArgError(n, fmt.Sprintf("invalid option: %s (must be one of %s)", str, strings.Join(options, ","))) + return 0 +} + +/* }}} */ + +/* optType {{{ */ + +func (ls *LState) OptInt(n int, d int) int { + v := ls.Get(n) + if v == LNil { + return d + } + if intv, ok := v.(LNumber); ok { + return int(intv) + } + ls.TypeError(n, LTNumber) + return 0 +} + +func (ls *LState) OptInt64(n int, d int64) int64 { + v := ls.Get(n) + if v == LNil { + return d + } + if intv, ok := v.(LNumber); ok { + return int64(intv) + } + ls.TypeError(n, LTNumber) + return 0 +} + +func (ls *LState) OptNumber(n int, d LNumber) LNumber { + v := ls.Get(n) + if v == LNil { + return d + } + if lv, ok := v.(LNumber); ok { + return lv + } + ls.TypeError(n, LTNumber) + return 0 +} + +func (ls *LState) OptString(n int, d string) string { + v := ls.Get(n) + if v == LNil { + return d + } + if lv, ok := v.(LString); ok { + return string(lv) + } + ls.TypeError(n, LTString) + return "" +} + +func (ls *LState) OptBool(n int, d bool) bool { + v := ls.Get(n) + if v == LNil { + return d + } + if lv, ok := v.(LBool); ok { + return bool(lv) + } + ls.TypeError(n, LTBool) + return false +} + +func (ls *LState) OptTable(n int, d *LTable) *LTable { + v := ls.Get(n) + if v == LNil { + return d + } + if lv, ok := v.(*LTable); ok { + return lv + } + ls.TypeError(n, LTTable) + return nil +} + +func (ls *LState) OptFunction(n int, d *LFunction) *LFunction { + v := ls.Get(n) + if v == LNil { + return d + } + if lv, ok := v.(*LFunction); ok { + return lv + } + ls.TypeError(n, LTFunction) + return nil +} + +func (ls *LState) OptUserData(n int, d *LUserData) *LUserData { + v := ls.Get(n) + if v == LNil { + return d + } + if lv, ok := v.(*LUserData); ok { + return lv + } + ls.TypeError(n, LTUserData) + return nil +} + +/* }}} */ + +/* error operations {{{ */ + +func (ls *LState) ArgError(n int, message string) { + ls.RaiseError("bad argument #%v to %v (%v)", n, ls.rawFrameFuncName(ls.currentFrame), message) +} + +func (ls *LState) TypeError(n int, typ LValueType) { + ls.RaiseError("bad argument #%v to %v (%v expected, got %v)", n, ls.rawFrameFuncName(ls.currentFrame), typ.String(), ls.Get(n).Type().String()) +} + +/* }}} */ + +/* debug operations {{{ */ + +func (ls *LState) Where(level int) string { + return ls.where(level, false) +} + +/* }}} */ + +/* table operations {{{ */ + +func (ls *LState) FindTable(obj *LTable, n string, size int) LValue { + names := strings.Split(n, ".") + curobj := obj + for _, name := range names { + if curobj.Type() != LTTable { + return LNil + } + nextobj := ls.RawGet(curobj, LString(name)) + if nextobj == LNil { + tb := ls.CreateTable(0, size) + ls.RawSet(curobj, LString(name), tb) + curobj = tb + } else if nextobj.Type() != LTTable { + return LNil + } else { + curobj = nextobj.(*LTable) + } + } + return curobj +} + +/* }}} */ + +/* register operations {{{ */ + +func (ls *LState) RegisterModule(name string, funcs map[string]LGFunction) LValue { + tb := ls.FindTable(ls.Get(RegistryIndex).(*LTable), "_LOADED", 1) + mod := ls.GetField(tb, name) + if mod.Type() != LTTable { + newmod := ls.FindTable(ls.Get(GlobalsIndex).(*LTable), name, len(funcs)) + if newmodtb, ok := newmod.(*LTable); !ok { + ls.RaiseError("name conflict for module(%v)", name) + } else { + for fname, fn := range funcs { + newmodtb.RawSetString(fname, ls.NewFunction(fn)) + } + ls.SetField(tb, name, newmodtb) + return newmodtb + } + } + return mod +} + +func (ls *LState) SetFuncs(tb *LTable, funcs map[string]LGFunction, upvalues ...LValue) *LTable { + for fname, fn := range funcs { + tb.RawSetString(fname, ls.NewClosure(fn, upvalues...)) + } + return tb +} + +/* }}} */ + +/* metatable operations {{{ */ + +func (ls *LState) NewTypeMetatable(typ string) *LTable { + regtable := ls.Get(RegistryIndex) + mt := ls.GetField(regtable, typ) + if tb, ok := mt.(*LTable); ok { + return tb + } + mtnew := ls.NewTable() + ls.SetField(regtable, typ, mtnew) + return mtnew +} + +func (ls *LState) GetMetaField(obj LValue, event string) LValue { + return ls.metaOp1(obj, event) +} + +func (ls *LState) GetTypeMetatable(typ string) LValue { + return ls.GetField(ls.Get(RegistryIndex), typ) +} + +func (ls *LState) CallMeta(obj LValue, event string) LValue { + op := ls.metaOp1(obj, event) + if op.Type() == LTFunction { + ls.reg.Push(op) + ls.reg.Push(obj) + ls.Call(1, 1) + return ls.reg.Pop() + } + return LNil +} + +/* }}} */ + +/* load and function call operations {{{ */ + +func (ls *LState) LoadFile(path string) (*LFunction, error) { + var file *os.File + var err error + if len(path) == 0 { + file = os.Stdin + } else { + file, err = os.Open(path) + defer file.Close() + if err != nil { + return nil, newApiErrorE(ApiErrorFile, err) + } + } + + reader := bufio.NewReader(file) + // get the first character. + c, err := reader.ReadByte() + if err != nil && err != io.EOF { + return nil, newApiErrorE(ApiErrorFile, err) + } + if c == byte('#') { + // Unix exec. file? + // skip first line + _, err, _ = readBufioLine(reader) + if err != nil { + return nil, newApiErrorE(ApiErrorFile, err) + } + } + + if err != io.EOF { + // if the file is not empty, + // unread the first character of the file or newline character(readBufioLine's last byte). + err = reader.UnreadByte() + if err != nil { + return nil, newApiErrorE(ApiErrorFile, err) + } + } + + return ls.Load(reader, path) +} + +func (ls *LState) LoadString(source string) (*LFunction, error) { + return ls.Load(strings.NewReader(source), "") +} + +func (ls *LState) DoFile(path string) error { + if fn, err := ls.LoadFile(path); err != nil { + return err + } else { + ls.Push(fn) + return ls.PCall(0, MultRet, nil) + } +} + +func (ls *LState) DoString(source string) error { + if fn, err := ls.LoadString(source); err != nil { + return err + } else { + ls.Push(fn) + return ls.PCall(0, MultRet, nil) + } +} + +/* }}} */ + +/* GopherLua original APIs {{{ */ + +// ToStringMeta returns string representation of given LValue. +// This method calls the `__tostring` meta method if defined. +func (ls *LState) ToStringMeta(lv LValue) LValue { + if fn, ok := ls.metaOp1(lv, "__tostring").assertFunction(); ok { + ls.Push(fn) + ls.Push(lv) + ls.Call(1, 1) + return ls.reg.Pop() + } else { + return LString(lv.String()) + } +} + +// Set a module loader to the package.preload table. +func (ls *LState) PreloadModule(name string, loader LGFunction) { + preload := ls.GetField(ls.GetField(ls.Get(EnvironIndex), "package"), "preload") + if _, ok := preload.(*LTable); !ok { + ls.RaiseError("package.preload must be a table") + } + ls.SetField(preload, name, ls.NewFunction(loader)) +} + +// Checks whether the given index is an LChannel and returns this channel. +func (ls *LState) CheckChannel(n int) chan LValue { + v := ls.Get(n) + if ch, ok := v.(LChannel); ok { + return (chan LValue)(ch) + } + ls.TypeError(n, LTChannel) + return nil +} + +// If the given index is a LChannel, returns this channel. If this argument is absent or is nil, returns ch. Otherwise, raises an error. +func (ls *LState) OptChannel(n int, ch chan LValue) chan LValue { + v := ls.Get(n) + if v == LNil { + return ch + } + if ch, ok := v.(LChannel); ok { + return (chan LValue)(ch) + } + ls.TypeError(n, LTChannel) + return nil +} + +/* }}} */ + +// diff --git a/vendor/github.com/yuin/gopher-lua/baselib.go b/vendor/github.com/yuin/gopher-lua/baselib.go new file mode 100644 index 00000000000..08c2b82dd10 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/baselib.go @@ -0,0 +1,562 @@ +package lua + +import ( + "fmt" + "io" + "os" + "runtime" + "strconv" + "strings" +) + +/* basic functions {{{ */ + +func OpenBase(L *LState) int { + global := L.Get(GlobalsIndex).(*LTable) + L.SetGlobal("_G", global) + L.SetGlobal("_VERSION", LString(PackageName+" "+PackageVersion)) + basemod := L.RegisterModule("_G", baseFuncs) + global.RawSetString("ipairs", L.NewClosure(baseIpairs, L.NewFunction(ipairsaux))) + global.RawSetString("pairs", L.NewClosure(basePairs, L.NewFunction(pairsaux))) + L.Push(basemod) + return 1 +} + +var baseFuncs = map[string]LGFunction{ + "assert": baseAssert, + "collectgarbage": baseCollectGarbage, + "dofile": baseDoFile, + "error": baseError, + "getfenv": baseGetFEnv, + "getmetatable": baseGetMetatable, + "load": baseLoad, + "loadfile": baseLoadFile, + "loadstring": baseLoadString, + "next": baseNext, + "pcall": basePCall, + "print": basePrint, + "rawequal": baseRawEqual, + "rawget": baseRawGet, + "rawset": baseRawSet, + "select": baseSelect, + "_printregs": base_PrintRegs, + "setfenv": baseSetFEnv, + "setmetatable": baseSetMetatable, + "tonumber": baseToNumber, + "tostring": baseToString, + "type": baseType, + "unpack": baseUnpack, + "xpcall": baseXPCall, + // loadlib + "module": loModule, + "require": loRequire, +} + +func baseAssert(L *LState) int { + if !L.ToBool(1) { + L.RaiseError(L.OptString(2, "assertion failed!")) + return 0 + } + return L.GetTop() +} + +func baseCollectGarbage(L *LState) int { + runtime.GC() + return 0 +} + +func baseDoFile(L *LState) int { + src := L.ToString(1) + top := L.GetTop() + fn, err := L.LoadFile(src) + if err != nil { + L.Push(LString(err.Error())) + L.Panic(L) + } + L.Push(fn) + L.Call(0, MultRet) + return L.GetTop() - top +} + +func baseError(L *LState) int { + obj := L.CheckAny(1) + level := L.OptInt(2, 1) + L.Error(obj, level) + return 0 +} + +func baseGetFEnv(L *LState) int { + var value LValue + if L.GetTop() == 0 { + value = LNumber(1) + } else { + value = L.Get(1) + } + + if fn, ok := value.(*LFunction); ok { + if !fn.IsG { + L.Push(fn.Env) + } else { + L.Push(L.G.Global) + } + return 1 + } + + if number, ok := value.(LNumber); ok { + level := int(float64(number)) + if level <= 0 { + L.Push(L.Env) + } else { + cf := L.currentFrame + for i := 0; i < level && cf != nil; i++ { + cf = cf.Parent + } + if cf == nil || cf.Fn.IsG { + L.Push(L.G.Global) + } else { + L.Push(cf.Fn.Env) + } + } + return 1 + } + + L.Push(L.G.Global) + return 1 +} + +func baseGetMetatable(L *LState) int { + L.Push(L.GetMetatable(L.CheckAny(1))) + return 1 +} + +func ipairsaux(L *LState) int { + tb := L.CheckTable(1) + i := L.CheckInt(2) + i++ + v := tb.RawGetInt(i) + if v == LNil { + return 0 + } else { + L.Pop(1) + L.Push(LNumber(i)) + L.Push(LNumber(i)) + L.Push(v) + return 2 + } +} + +func baseIpairs(L *LState) int { + tb := L.CheckTable(1) + L.Push(L.Get(UpvalueIndex(1))) + L.Push(tb) + L.Push(LNumber(0)) + return 3 +} + +func loadaux(L *LState, reader io.Reader, chunkname string) int { + if fn, err := L.Load(reader, chunkname); err != nil { + L.Push(LNil) + L.Push(LString(err.Error())) + return 2 + } else { + L.Push(fn) + return 1 + } +} + +func baseLoad(L *LState) int { + fn := L.CheckFunction(1) + chunkname := L.OptString(2, "?") + top := L.GetTop() + buf := []string{} + for { + L.SetTop(top) + L.Push(fn) + L.Call(0, 1) + ret := L.reg.Pop() + if ret == LNil { + break + } else if LVCanConvToString(ret) { + str := ret.String() + if len(str) > 0 { + buf = append(buf, string(str)) + } else { + break + } + } else { + L.Push(LNil) + L.Push(LString("reader function must return a string")) + return 2 + } + } + return loadaux(L, strings.NewReader(strings.Join(buf, "")), chunkname) +} + +func baseLoadFile(L *LState) int { + var reader io.Reader + var chunkname string + var err error + if L.GetTop() < 1 { + reader = os.Stdin + chunkname = "" + } else { + chunkname = L.CheckString(1) + reader, err = os.Open(chunkname) + if err != nil { + L.Push(LNil) + L.Push(LString(fmt.Sprintf("can not open file: %v", chunkname))) + return 2 + } + defer reader.(*os.File).Close() + } + return loadaux(L, reader, chunkname) +} + +func baseLoadString(L *LState) int { + return loadaux(L, strings.NewReader(L.CheckString(1)), L.OptString(2, "")) +} + +func baseNext(L *LState) int { + tb := L.CheckTable(1) + index := LNil + if L.GetTop() >= 2 { + index = L.Get(2) + } + key, value := tb.Next(index) + if key == LNil { + L.Push(LNil) + return 1 + } + L.Push(key) + L.Push(value) + return 2 +} + +func pairsaux(L *LState) int { + tb := L.CheckTable(1) + key, value := tb.Next(L.Get(2)) + if key == LNil { + return 0 + } else { + L.Pop(1) + L.Push(key) + L.Push(key) + L.Push(value) + return 2 + } +} + +func basePairs(L *LState) int { + tb := L.CheckTable(1) + L.Push(L.Get(UpvalueIndex(1))) + L.Push(tb) + L.Push(LNil) + return 3 +} + +func basePCall(L *LState) int { + L.CheckFunction(1) + nargs := L.GetTop() - 1 + if err := L.PCall(nargs, MultRet, nil); err != nil { + L.Push(LFalse) + if aerr, ok := err.(*ApiError); ok { + L.Push(aerr.Object) + } else { + L.Push(LString(err.Error())) + } + return 2 + } else { + L.Insert(LTrue, 1) + return L.GetTop() + } +} + +func basePrint(L *LState) int { + top := L.GetTop() + for i := 1; i <= top; i++ { + fmt.Print(L.ToStringMeta(L.Get(i)).String()) + if i != top { + fmt.Print("\t") + } + } + fmt.Println("") + return 0 +} + +func base_PrintRegs(L *LState) int { + L.printReg() + return 0 +} + +func baseRawEqual(L *LState) int { + if L.CheckAny(1) == L.CheckAny(2) { + L.Push(LTrue) + } else { + L.Push(LFalse) + } + return 1 +} + +func baseRawGet(L *LState) int { + L.Push(L.RawGet(L.CheckTable(1), L.CheckAny(2))) + return 1 +} + +func baseRawSet(L *LState) int { + L.RawSet(L.CheckTable(1), L.CheckAny(2), L.CheckAny(3)) + return 0 +} + +func baseSelect(L *LState) int { + L.CheckTypes(1, LTNumber, LTString) + switch lv := L.Get(1).(type) { + case LNumber: + idx := int(lv) + num := L.reg.Top() - L.indexToReg(int(lv)) - 1 + if idx < 0 { + num++ + } + return num + case LString: + if string(lv) != "#" { + L.ArgError(1, "invalid string '"+string(lv)+"'") + } + L.Push(LNumber(L.GetTop() - 1)) + return 1 + } + return 0 +} + +func baseSetFEnv(L *LState) int { + var value LValue + if L.GetTop() == 0 { + value = LNumber(1) + } else { + value = L.Get(1) + } + env := L.CheckTable(2) + + if fn, ok := value.(*LFunction); ok { + if fn.IsG { + L.RaiseError("cannot change the environment of given object") + } else { + fn.Env = env + L.Push(fn) + return 1 + } + } + + if number, ok := value.(LNumber); ok { + level := int(float64(number)) + if level <= 0 { + L.Env = env + return 0 + } + + cf := L.currentFrame + for i := 0; i < level && cf != nil; i++ { + cf = cf.Parent + } + if cf == nil || cf.Fn.IsG { + L.RaiseError("cannot change the environment of given object") + } else { + cf.Fn.Env = env + L.Push(cf.Fn) + return 1 + } + } + + L.RaiseError("cannot change the environment of given object") + return 0 +} + +func baseSetMetatable(L *LState) int { + L.CheckTypes(2, LTNil, LTTable) + obj := L.Get(1) + if obj == LNil { + L.RaiseError("cannot set metatable to a nil object.") + } + mt := L.Get(2) + if m := L.metatable(obj, true); m != LNil { + if tb, ok := m.(*LTable); ok && tb.RawGetString("__metatable") != LNil { + L.RaiseError("cannot change a protected metatable") + } + } + L.SetMetatable(obj, mt) + L.SetTop(1) + return 1 +} + +func baseToNumber(L *LState) int { + base := L.OptInt(2, 10) + switch lv := L.CheckAny(1).(type) { + case LNumber: + L.Push(lv) + case LString: + str := strings.Trim(string(lv), " \n\t") + if strings.Index(str, ".") > -1 { + if v, err := strconv.ParseFloat(str, LNumberBit); err != nil { + L.Push(LNil) + } else { + L.Push(LNumber(v)) + } + } else { + if v, err := strconv.ParseInt(str, base, LNumberBit); err != nil { + L.Push(LNil) + } else { + L.Push(LNumber(v)) + } + } + default: + L.Push(LNil) + } + return 1 +} + +func baseToString(L *LState) int { + v1 := L.CheckAny(1) + L.Push(L.ToStringMeta(v1)) + return 1 +} + +func baseType(L *LState) int { + L.Push(LString(L.CheckAny(1).Type().String())) + return 1 +} + +func baseUnpack(L *LState) int { + tb := L.CheckTable(1) + start := L.OptInt(2, 1) + end := L.OptInt(3, tb.Len()) + for i := start; i <= end; i++ { + L.Push(tb.RawGetInt(i)) + } + ret := end - start + 1 + if ret < 0 { + return 0 + } + return ret +} + +func baseXPCall(L *LState) int { + fn := L.CheckFunction(1) + errfunc := L.CheckFunction(2) + + top := L.GetTop() + L.Push(fn) + if err := L.PCall(0, MultRet, errfunc); err != nil { + L.Push(LFalse) + if aerr, ok := err.(*ApiError); ok { + L.Push(aerr.Object) + } else { + L.Push(LString(err.Error())) + } + return 2 + } else { + L.Insert(LTrue, top+1) + return L.GetTop() - top + } +} + +/* }}} */ + +/* load lib {{{ */ + +func loModule(L *LState) int { + name := L.CheckString(1) + loaded := L.GetField(L.Get(RegistryIndex), "_LOADED") + tb := L.GetField(loaded, name) + if _, ok := tb.(*LTable); !ok { + tb = L.FindTable(L.Get(GlobalsIndex).(*LTable), name, 1) + if tb == LNil { + L.RaiseError("name conflict for module: %v", name) + } + L.SetField(loaded, name, tb) + } + if L.GetField(tb, "_NAME") == LNil { + L.SetField(tb, "_M", tb) + L.SetField(tb, "_NAME", LString(name)) + names := strings.Split(name, ".") + pname := "" + if len(names) > 1 { + pname = strings.Join(names[:len(names)-1], ".") + "." + } + L.SetField(tb, "_PACKAGE", LString(pname)) + } + + caller := L.currentFrame.Parent + if caller == nil { + L.RaiseError("no calling stack.") + } else if caller.Fn.IsG { + L.RaiseError("module() can not be called from GFunctions.") + } + L.SetFEnv(caller.Fn, tb) + + top := L.GetTop() + for i := 2; i <= top; i++ { + L.Push(L.Get(i)) + L.Push(tb) + L.Call(1, 0) + } + L.Push(tb) + return 1 +} + +var loopdetection = &LUserData{} + +func loRequire(L *LState) int { + name := L.CheckString(1) + loaded := L.GetField(L.Get(RegistryIndex), "_LOADED") + lv := L.GetField(loaded, name) + if LVAsBool(lv) { + if lv == loopdetection { + L.RaiseError("loop or previous error loading module: %s", name) + } + L.Push(lv) + return 1 + } + loaders, ok := L.GetField(L.Get(RegistryIndex), "_LOADERS").(*LTable) + if !ok { + L.RaiseError("package.loaders must be a table") + } + messages := []string{} + var modasfunc LValue + for i := 1; ; i++ { + loader := L.RawGetInt(loaders, i) + if loader == LNil { + L.RaiseError("module %s not found:\n\t%s, ", name, strings.Join(messages, "\n\t")) + } + L.Push(loader) + L.Push(LString(name)) + L.Call(1, 1) + ret := L.reg.Pop() + switch retv := ret.(type) { + case *LFunction: + modasfunc = retv + goto loopbreak + case LString: + messages = append(messages, string(retv)) + } + } +loopbreak: + L.SetField(loaded, name, loopdetection) + L.Push(modasfunc) + L.Push(LString(name)) + L.Call(1, 1) + ret := L.reg.Pop() + modv := L.GetField(loaded, name) + if ret != LNil && modv == loopdetection { + L.SetField(loaded, name, ret) + L.Push(ret) + } else if modv == loopdetection { + L.SetField(loaded, name, LTrue) + L.Push(LTrue) + } else { + L.Push(modv) + } + return 1 +} + +/* }}} */ + +// diff --git a/vendor/github.com/yuin/gopher-lua/channellib.go b/vendor/github.com/yuin/gopher-lua/channellib.go new file mode 100644 index 00000000000..6df3e889a1e --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/channellib.go @@ -0,0 +1,152 @@ +package lua + +import ( + "reflect" +) + +func checkChannel(L *LState, idx int) reflect.Value { + ch := L.CheckChannel(idx) + return reflect.ValueOf(ch) +} + +func checkGoroutineSafe(L *LState, idx int) LValue { + v := L.CheckAny(2) + if !isGoroutineSafe(v) { + L.ArgError(2, "can not send a function, userdata, thread or table that has a metatable") + } + return v +} + +func OpenChannel(L *LState) int { + var mod LValue + //_, ok := L.G.builtinMts[int(LTChannel)] + // if !ok { + mod = L.RegisterModule(ChannelLibName, channelFuncs) + mt := L.SetFuncs(L.NewTable(), channelMethods) + mt.RawSetString("__index", mt) + L.G.builtinMts[int(LTChannel)] = mt + // } + L.Push(mod) + return 1 +} + +var channelFuncs = map[string]LGFunction{ + "make": channelMake, + "select": channelSelect, +} + +func channelMake(L *LState) int { + buffer := L.OptInt(1, 0) + L.Push(LChannel(make(chan LValue, buffer))) + return 1 +} + +func channelSelect(L *LState) int { + //TODO check case table size + cases := make([]reflect.SelectCase, L.GetTop()) + top := L.GetTop() + for i := 0; i < top; i++ { + cas := reflect.SelectCase{reflect.SelectSend, reflect.ValueOf(nil), reflect.ValueOf(nil)} + tbl := L.CheckTable(i + 1) + dir, ok1 := tbl.RawGetInt(1).(LString) + if !ok1 { + L.ArgError(i+1, "invalid select case") + } + switch string(dir) { + case "<-|": + ch, ok := tbl.RawGetInt(2).(LChannel) + if !ok { + L.ArgError(i+1, "invalid select case") + } + cas.Chan = reflect.ValueOf((chan LValue)(ch)) + v := tbl.RawGetInt(3) + if !isGoroutineSafe(v) { + L.ArgError(i+1, "can not send a function, userdata, thread or table that has a metatable") + } + cas.Send = reflect.ValueOf(v) + case "|<-": + ch, ok := tbl.RawGetInt(2).(LChannel) + if !ok { + L.ArgError(i+1, "invalid select case") + } + cas.Chan = reflect.ValueOf((chan LValue)(ch)) + cas.Dir = reflect.SelectRecv + case "default": + cas.Dir = reflect.SelectDefault + default: + L.ArgError(i+1, "invalid channel direction:"+string(dir)) + } + cases[i] = cas + } + + pos, recv, rok := reflect.Select(cases) + lv := LNil + if recv.Kind() != 0 { + lv, _ = recv.Interface().(LValue) + if lv == nil { + lv = LNil + } + } + tbl := L.Get(pos + 1).(*LTable) + last := tbl.RawGetInt(tbl.Len()) + if last.Type() == LTFunction { + L.Push(last) + switch cases[pos].Dir { + case reflect.SelectRecv: + if rok { + L.Push(LTrue) + } else { + L.Push(LFalse) + } + L.Push(lv) + L.Call(2, 0) + case reflect.SelectSend: + L.Push(tbl.RawGetInt(3)) + L.Call(1, 0) + case reflect.SelectDefault: + L.Call(0, 0) + } + } + L.Push(LNumber(pos + 1)) + L.Push(lv) + if rok { + L.Push(LTrue) + } else { + L.Push(LFalse) + } + return 3 +} + +var channelMethods = map[string]LGFunction{ + "receive": channelReceive, + "send": channelSend, + "close": channelClose, +} + +func channelReceive(L *LState) int { + rch := checkChannel(L, 1) + v, ok := rch.Recv() + if ok { + L.Push(LTrue) + L.Push(v.Interface().(LValue)) + } else { + L.Push(LFalse) + L.Push(LNil) + } + return 2 +} + +func channelSend(L *LState) int { + rch := checkChannel(L, 1) + v := checkGoroutineSafe(L, 2) + rch.Send(reflect.ValueOf(v)) + return 0 +} + +func channelClose(L *LState) int { + rch := checkChannel(L, 1) + rch.Close() + return 0 +} + +// diff --git a/vendor/github.com/yuin/gopher-lua/compile.go b/vendor/github.com/yuin/gopher-lua/compile.go new file mode 100644 index 00000000000..e756ca062bf --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/compile.go @@ -0,0 +1,1655 @@ +package lua + +import ( + "fmt" + "github.com/yuin/gopher-lua/ast" + "math" + "reflect" +) + +/* internal constants & structs {{{ */ + +const maxRegisters = 200 + +type expContextType int + +const ( + ecGlobal expContextType = iota + ecUpvalue + ecLocal + ecTable + ecVararg + ecMethod + ecNone +) + +const regNotDefined = opMaxArgsA + 1 +const labelNoJump = 0 + +type expcontext struct { + ctype expContextType + reg int + // varargopt >= 0: wants varargopt+1 results, i.e a = func() + // varargopt = -1: ignore results i.e func() + // varargopt = -2: receive all results i.e a = {func()} + varargopt int +} + +type assigncontext struct { + ec *expcontext + keyrk int + valuerk int + keyks bool + needmove bool +} + +type lblabels struct { + t int + f int + e int + b bool +} + +type constLValueExpr struct { + ast.ExprBase + + Value LValue +} + +// }}} + +/* utilities {{{ */ +var _ecnone0 = &expcontext{ecNone, regNotDefined, 0} +var _ecnonem1 = &expcontext{ecNone, regNotDefined, -1} +var _ecnonem2 = &expcontext{ecNone, regNotDefined, -2} +var ecfuncdef = &expcontext{ecMethod, regNotDefined, 0} + +func ecupdate(ec *expcontext, ctype expContextType, reg, varargopt int) { + ec.ctype = ctype + ec.reg = reg + ec.varargopt = varargopt +} + +func ecnone(varargopt int) *expcontext { + switch varargopt { + case 0: + return _ecnone0 + case -1: + return _ecnonem1 + case -2: + return _ecnonem2 + } + return &expcontext{ecNone, regNotDefined, varargopt} +} + +func sline(pos ast.PositionHolder) int { + return pos.Line() +} + +func eline(pos ast.PositionHolder) int { + return pos.LastLine() +} + +func savereg(ec *expcontext, reg int) int { + if ec.ctype != ecLocal || ec.reg == regNotDefined { + return reg + } + return ec.reg +} + +func raiseCompileError(context *funcContext, line int, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + panic(&CompileError{context: context, Line: line, Message: msg}) +} + +func isVarArgReturnExpr(expr ast.Expr) bool { + switch ex := expr.(type) { + case *ast.FuncCallExpr: + return !ex.AdjustRet + case *ast.Comma3Expr: + return true + } + return false +} + +func lnumberValue(expr ast.Expr) (LNumber, bool) { + if ex, ok := expr.(*ast.NumberExpr); ok { + lv, err := parseNumber(ex.Value) + if err != nil { + lv = LNumber(math.NaN()) + } + return lv, true + } else if ex, ok := expr.(*constLValueExpr); ok { + return ex.Value.(LNumber), true + } + return 0, false +} + +/* utilities }}} */ + +type CompileError struct { // {{{ + context *funcContext + Line int + Message string +} + +func (e *CompileError) Error() string { + return fmt.Sprintf("compile error near line(%v) %v: %v", e.Line, e.context.Proto.SourceName, e.Message) +} // }}} + +type codeStore struct { // {{{ + codes []uint32 + lines []int + pc int +} + +func (cd *codeStore) Add(inst uint32, line int) { + if l := len(cd.codes); l <= 0 || cd.pc == l { + cd.codes = append(cd.codes, inst) + cd.lines = append(cd.lines, line) + } else { + cd.codes[cd.pc] = inst + cd.lines[cd.pc] = line + } + cd.pc++ +} + +func (cd *codeStore) AddABC(op int, a int, b int, c int, line int) { + cd.Add(opCreateABC(op, a, b, c), line) +} + +func (cd *codeStore) AddABx(op int, a int, bx int, line int) { + cd.Add(opCreateABx(op, a, bx), line) +} + +func (cd *codeStore) AddASbx(op int, a int, sbx int, line int) { + cd.Add(opCreateASbx(op, a, sbx), line) +} + +func (cd *codeStore) PropagateKMV(top int, save *int, reg *int, inc int) { + lastinst := cd.Last() + if opGetArgA(lastinst) >= top { + switch opGetOpCode(lastinst) { + case OP_LOADK: + cindex := opGetArgBx(lastinst) + if cindex <= opMaxIndexRk { + cd.Pop() + *save = opRkAsk(cindex) + return + } + case OP_MOVE: + cd.Pop() + *save = opGetArgB(lastinst) + return + } + } + *save = *reg + *reg = *reg + inc +} + +func (cd *codeStore) PropagateMV(top int, save *int, reg *int, inc int) { + lastinst := cd.Last() + if opGetArgA(lastinst) >= top { + switch opGetOpCode(lastinst) { + case OP_MOVE: + cd.Pop() + *save = opGetArgB(lastinst) + return + } + } + *save = *reg + *reg = *reg + inc +} + +func (cd *codeStore) SetOpCode(pc int, v int) { + opSetOpCode(&cd.codes[pc], v) +} + +func (cd *codeStore) SetA(pc int, v int) { + opSetArgA(&cd.codes[pc], v) +} + +func (cd *codeStore) SetB(pc int, v int) { + opSetArgB(&cd.codes[pc], v) +} + +func (cd *codeStore) SetC(pc int, v int) { + opSetArgC(&cd.codes[pc], v) +} + +func (cd *codeStore) SetBx(pc int, v int) { + opSetArgBx(&cd.codes[pc], v) +} + +func (cd *codeStore) SetSbx(pc int, v int) { + opSetArgSbx(&cd.codes[pc], v) +} + +func (cd *codeStore) At(pc int) uint32 { + return cd.codes[pc] +} + +func (cd *codeStore) List() []uint32 { + return cd.codes[:cd.pc] +} + +func (cd *codeStore) PosList() []int { + return cd.lines[:cd.pc] +} + +func (cd *codeStore) LastPC() int { + return cd.pc - 1 +} + +func (cd *codeStore) Last() uint32 { + if cd.pc == 0 { + return opInvalidInstruction + } + return cd.codes[cd.pc-1] +} + +func (cd *codeStore) Pop() { + cd.pc-- +} /* }}} Code */ + +/* {{{ VarNamePool */ + +type varNamePoolValue struct { + Index int + Name string +} + +type varNamePool struct { + names []string + offset int +} + +func newVarNamePool(offset int) *varNamePool { + return &varNamePool{make([]string, 0, 16), offset} +} + +func (vp *varNamePool) Names() []string { + return vp.names +} + +func (vp *varNamePool) List() []varNamePoolValue { + result := make([]varNamePoolValue, len(vp.names), len(vp.names)) + for i, name := range vp.names { + result[i].Index = i + vp.offset + result[i].Name = name + } + return result +} + +func (vp *varNamePool) LastIndex() int { + return vp.offset + len(vp.names) +} + +func (vp *varNamePool) Find(name string) int { + for i := len(vp.names) - 1; i >= 0; i-- { + if vp.names[i] == name { + return i + vp.offset + } + } + return -1 +} + +func (vp *varNamePool) RegisterUnique(name string) int { + index := vp.Find(name) + if index < 0 { + return vp.Register(name) + } + return index +} + +func (vp *varNamePool) Register(name string) int { + vp.names = append(vp.names, name) + return len(vp.names) - 1 + vp.offset +} + +/* }}} VarNamePool */ + +/* FuncContext {{{ */ + +type codeBlock struct { + LocalVars *varNamePool + BreakLabel int + Parent *codeBlock + RefUpvalue bool + LineStart int + LastLine int +} + +func newCodeBlock(localvars *varNamePool, blabel int, parent *codeBlock, pos ast.PositionHolder) *codeBlock { + bl := &codeBlock{localvars, blabel, parent, false, 0, 0} + if pos != nil { + bl.LineStart = pos.Line() + bl.LastLine = pos.LastLine() + } + return bl +} + +type funcContext struct { + Proto *FunctionProto + Code *codeStore + Parent *funcContext + Upvalues *varNamePool + Block *codeBlock + Blocks []*codeBlock + regTop int + labelId int + labelPc map[int]int +} + +func newFuncContext(sourcename string, parent *funcContext) *funcContext { + fc := &funcContext{ + Proto: newFunctionProto(sourcename), + Code: &codeStore{make([]uint32, 0, 1024), make([]int, 0, 1024), 0}, + Parent: parent, + Upvalues: newVarNamePool(0), + Block: newCodeBlock(newVarNamePool(0), labelNoJump, nil, nil), + regTop: 0, + labelId: 1, + labelPc: map[int]int{}, + } + fc.Blocks = []*codeBlock{fc.Block} + return fc +} + +func (fc *funcContext) NewLabel() int { + ret := fc.labelId + fc.labelId++ + return ret +} + +func (fc *funcContext) SetLabelPc(label int, pc int) { + fc.labelPc[label] = pc +} + +func (fc *funcContext) GetLabelPc(label int) int { + return fc.labelPc[label] +} + +func (fc *funcContext) ConstIndex(value LValue) int { + ctype := value.Type() + for i, lv := range fc.Proto.Constants { + if lv.Type() == ctype && lv == value { + return i + } + } + fc.Proto.Constants = append(fc.Proto.Constants, value) + v := len(fc.Proto.Constants) - 1 + if v > opMaxArgBx { + raiseCompileError(fc, fc.Proto.LineDefined, "too many constants") + } + return v +} + +func (fc *funcContext) RegisterLocalVar(name string) int { + ret := fc.Block.LocalVars.Register(name) + fc.Proto.DbgLocals = append(fc.Proto.DbgLocals, &DbgLocalInfo{Name: name, StartPc: fc.Code.LastPC() + 1}) + fc.SetRegTop(fc.RegTop() + 1) + return ret +} + +func (fc *funcContext) FindLocalVarAndBlock(name string) (int, *codeBlock) { + for block := fc.Block; block != nil; block = block.Parent { + if index := block.LocalVars.Find(name); index > -1 { + return index, block + } + } + return -1, nil +} + +func (fc *funcContext) FindLocalVar(name string) int { + idx, _ := fc.FindLocalVarAndBlock(name) + return idx +} + +func (fc *funcContext) LocalVars() []varNamePoolValue { + result := make([]varNamePoolValue, 0, 32) + for _, block := range fc.Blocks { + result = append(result, block.LocalVars.List()...) + } + return result +} + +func (fc *funcContext) EnterBlock(blabel int, pos ast.PositionHolder) { + fc.Block = newCodeBlock(newVarNamePool(fc.RegTop()), blabel, fc.Block, pos) + fc.Blocks = append(fc.Blocks, fc.Block) +} + +func (fc *funcContext) CloseUpvalues() int { + n := -1 + if fc.Block.RefUpvalue { + n = fc.Block.Parent.LocalVars.LastIndex() + fc.Code.AddABC(OP_CLOSE, n, 0, 0, fc.Block.LastLine) + } + return n +} + +func (fc *funcContext) LeaveBlock() int { + closed := fc.CloseUpvalues() + fc.EndScope() + fc.Block = fc.Block.Parent + fc.SetRegTop(fc.Block.LocalVars.LastIndex()) + return closed +} + +func (fc *funcContext) EndScope() { + for _, vr := range fc.Block.LocalVars.List() { + fc.Proto.DbgLocals[vr.Index].EndPc = fc.Code.LastPC() + } +} + +func (fc *funcContext) SetRegTop(top int) { + if top > maxRegisters { + raiseCompileError(fc, fc.Proto.LineDefined, "too many local variables") + } + fc.regTop = top +} + +func (fc *funcContext) RegTop() int { + return fc.regTop +} + +/* FuncContext }}} */ + +func compileChunk(context *funcContext, chunk []ast.Stmt) { // {{{ + for _, stmt := range chunk { + compileStmt(context, stmt) + } +} // }}} + +func compileBlock(context *funcContext, chunk []ast.Stmt) { // {{{ + if len(chunk) == 0 { + return + } + ph := &ast.Node{} + ph.SetLine(sline(chunk[0])) + ph.SetLastLine(eline(chunk[len(chunk)-1])) + context.EnterBlock(labelNoJump, ph) + for _, stmt := range chunk { + compileStmt(context, stmt) + } + context.LeaveBlock() +} // }}} + +func compileStmt(context *funcContext, stmt ast.Stmt) { // {{{ + switch st := stmt.(type) { + case *ast.AssignStmt: + compileAssignStmt(context, st) + case *ast.LocalAssignStmt: + compileLocalAssignStmt(context, st) + case *ast.FuncCallStmt: + compileFuncCallExpr(context, context.RegTop(), st.Expr.(*ast.FuncCallExpr), ecnone(-1)) + case *ast.DoBlockStmt: + context.EnterBlock(labelNoJump, st) + compileChunk(context, st.Stmts) + context.LeaveBlock() + case *ast.WhileStmt: + compileWhileStmt(context, st) + case *ast.RepeatStmt: + compileRepeatStmt(context, st) + case *ast.FuncDefStmt: + compileFuncDefStmt(context, st) + case *ast.ReturnStmt: + compileReturnStmt(context, st) + case *ast.IfStmt: + compileIfStmt(context, st) + case *ast.BreakStmt: + compileBreakStmt(context, st) + case *ast.NumberForStmt: + compileNumberForStmt(context, st) + case *ast.GenericForStmt: + compileGenericForStmt(context, st) + } +} // }}} + +func compileAssignStmtLeft(context *funcContext, stmt *ast.AssignStmt) (int, []*assigncontext) { // {{{ + reg := context.RegTop() + acs := make([]*assigncontext, 0, len(stmt.Lhs)) + for i, lhs := range stmt.Lhs { + islast := i == len(stmt.Lhs)-1 + switch st := lhs.(type) { + case *ast.IdentExpr: + identtype := getIdentRefType(context, context, st) + ec := &expcontext{identtype, regNotDefined, 0} + switch identtype { + case ecGlobal: + context.ConstIndex(LString(st.Value)) + case ecUpvalue: + context.Upvalues.RegisterUnique(st.Value) + case ecLocal: + if islast { + ec.reg = context.FindLocalVar(st.Value) + } + } + acs = append(acs, &assigncontext{ec, 0, 0, false, false}) + case *ast.AttrGetExpr: + ac := &assigncontext{&expcontext{ecTable, regNotDefined, 0}, 0, 0, false, false} + compileExprWithKMVPropagation(context, st.Object, ®, &ac.ec.reg) + compileExprWithKMVPropagation(context, st.Key, ®, &ac.keyrk) + if _, ok := st.Key.(*ast.StringExpr); ok { + ac.keyks = true + } + acs = append(acs, ac) + + default: + panic("invalid left expression.") + } + } + return reg, acs +} // }}} + +func compileAssignStmtRight(context *funcContext, stmt *ast.AssignStmt, reg int, acs []*assigncontext) (int, []*assigncontext) { // {{{ + lennames := len(stmt.Lhs) + lenexprs := len(stmt.Rhs) + namesassigned := 0 + + for namesassigned < lennames { + ac := acs[namesassigned] + ec := ac.ec + var expr ast.Expr = nil + if namesassigned >= lenexprs { + expr = &ast.NilExpr{} + expr.SetLine(sline(stmt.Lhs[namesassigned])) + expr.SetLastLine(eline(stmt.Lhs[namesassigned])) + } else if isVarArgReturnExpr(stmt.Rhs[namesassigned]) && (lenexprs-namesassigned-1) <= 0 { + varargopt := lennames - namesassigned - 1 + regstart := reg + reginc := compileExpr(context, reg, stmt.Rhs[namesassigned], ecnone(varargopt)) + reg += reginc + for i := namesassigned; i < namesassigned+int(reginc); i++ { + acs[i].needmove = true + if acs[i].ec.ctype == ecTable { + acs[i].valuerk = regstart + (i - namesassigned) + } + } + namesassigned = lennames + continue + } + + if expr == nil { + expr = stmt.Rhs[namesassigned] + } + idx := reg + reginc := compileExpr(context, reg, expr, ec) + if ec.ctype == ecTable { + if _, ok := expr.(*ast.LogicalOpExpr); !ok { + context.Code.PropagateKMV(context.RegTop(), &ac.valuerk, ®, reginc) + } else { + ac.valuerk = idx + reg += reginc + } + } else { + ac.needmove = reginc != 0 + reg += reginc + } + namesassigned += 1 + } + + rightreg := reg - 1 + + // extra right exprs + for i := namesassigned; i < lenexprs; i++ { + varargopt := -1 + if i != lenexprs-1 { + varargopt = 0 + } + reg += compileExpr(context, reg, stmt.Rhs[i], ecnone(varargopt)) + } + return rightreg, acs +} // }}} + +func compileAssignStmt(context *funcContext, stmt *ast.AssignStmt) { // {{{ + code := context.Code + lennames := len(stmt.Lhs) + reg, acs := compileAssignStmtLeft(context, stmt) + reg, acs = compileAssignStmtRight(context, stmt, reg, acs) + + for i := lennames - 1; i >= 0; i-- { + ex := stmt.Lhs[i] + switch acs[i].ec.ctype { + case ecLocal: + if acs[i].needmove { + code.AddABC(OP_MOVE, context.FindLocalVar(ex.(*ast.IdentExpr).Value), reg, 0, sline(ex)) + reg -= 1 + } + case ecGlobal: + code.AddABx(OP_SETGLOBAL, reg, context.ConstIndex(LString(ex.(*ast.IdentExpr).Value)), sline(ex)) + reg -= 1 + case ecUpvalue: + code.AddABC(OP_SETUPVAL, reg, context.Upvalues.RegisterUnique(ex.(*ast.IdentExpr).Value), 0, sline(ex)) + reg -= 1 + case ecTable: + opcode := OP_SETTABLE + if acs[i].keyks { + opcode = OP_SETTABLEKS + } + code.AddABC(opcode, acs[i].ec.reg, acs[i].keyrk, acs[i].valuerk, sline(ex)) + if !opIsK(acs[i].valuerk) { + reg -= 1 + } + } + } +} // }}} + +func compileRegAssignment(context *funcContext, names []string, exprs []ast.Expr, reg int, nvars int, line int) { // {{{ + lennames := len(names) + lenexprs := len(exprs) + namesassigned := 0 + ec := &expcontext{} + + for namesassigned < lennames && namesassigned < lenexprs { + if isVarArgReturnExpr(exprs[namesassigned]) && (lenexprs-namesassigned-1) <= 0 { + + varargopt := nvars - namesassigned + ecupdate(ec, ecVararg, reg, varargopt-1) + compileExpr(context, reg, exprs[namesassigned], ec) + reg += varargopt + namesassigned = lennames + } else { + ecupdate(ec, ecLocal, reg, 0) + compileExpr(context, reg, exprs[namesassigned], ec) + reg += 1 + namesassigned += 1 + } + } + + // extra left names + if lennames > namesassigned { + restleft := lennames - namesassigned - 1 + context.Code.AddABC(OP_LOADNIL, reg, reg+restleft, 0, line) + reg += restleft + } + + // extra right exprs + for i := namesassigned; i < lenexprs; i++ { + varargopt := -1 + if i != lenexprs-1 { + varargopt = 0 + } + ecupdate(ec, ecNone, reg, varargopt) + reg += compileExpr(context, reg, exprs[i], ec) + } +} // }}} + +func compileLocalAssignStmt(context *funcContext, stmt *ast.LocalAssignStmt) { // {{{ + reg := context.RegTop() + if len(stmt.Names) == 1 && len(stmt.Exprs) == 1 { + if _, ok := stmt.Exprs[0].(*ast.FunctionExpr); ok { + context.RegisterLocalVar(stmt.Names[0]) + compileRegAssignment(context, stmt.Names, stmt.Exprs, reg, len(stmt.Names), sline(stmt)) + return + } + } + + compileRegAssignment(context, stmt.Names, stmt.Exprs, reg, len(stmt.Names), sline(stmt)) + for _, name := range stmt.Names { + context.RegisterLocalVar(name) + } +} // }}} + +func compileReturnStmt(context *funcContext, stmt *ast.ReturnStmt) { // {{{ + lenexprs := len(stmt.Exprs) + code := context.Code + reg := context.RegTop() + a := reg + lastisvaarg := false + + if lenexprs == 1 { + switch ex := stmt.Exprs[0].(type) { + case *ast.IdentExpr: + if idx := context.FindLocalVar(ex.Value); idx > -1 { + code.AddABC(OP_RETURN, idx, 2, 0, sline(stmt)) + return + } + case *ast.FuncCallExpr: + reg += compileExpr(context, reg, ex, ecnone(-2)) + code.SetOpCode(code.LastPC(), OP_TAILCALL) + code.AddABC(OP_RETURN, a, 0, 0, sline(stmt)) + return + } + } + + for i, expr := range stmt.Exprs { + if i == lenexprs-1 && isVarArgReturnExpr(expr) { + compileExpr(context, reg, expr, ecnone(-2)) + lastisvaarg = true + } else { + reg += compileExpr(context, reg, expr, ecnone(0)) + } + } + count := reg - a + 1 + if lastisvaarg { + count = 0 + } + context.Code.AddABC(OP_RETURN, a, count, 0, sline(stmt)) +} // }}} + +func compileIfStmt(context *funcContext, stmt *ast.IfStmt) { // {{{ + thenlabel := context.NewLabel() + elselabel := context.NewLabel() + endlabel := context.NewLabel() + + compileBranchCondition(context, context.RegTop(), stmt.Condition, thenlabel, elselabel, false) + context.SetLabelPc(thenlabel, context.Code.LastPC()) + compileBlock(context, stmt.Then) + if len(stmt.Else) > 0 { + context.Code.AddASbx(OP_JMP, 0, endlabel, sline(stmt)) + } + context.SetLabelPc(elselabel, context.Code.LastPC()) + if len(stmt.Else) > 0 { + compileBlock(context, stmt.Else) + context.SetLabelPc(endlabel, context.Code.LastPC()) + } + +} // }}} + +func compileBranchCondition(context *funcContext, reg int, expr ast.Expr, thenlabel, elselabel int, hasnextcond bool) { // {{{ + // TODO folding constants? + code := context.Code + flip := 0 + jumplabel := elselabel + if hasnextcond { + flip = 1 + jumplabel = thenlabel + } + + switch ex := expr.(type) { + case *ast.FalseExpr, *ast.NilExpr: + if !hasnextcond { + code.AddASbx(OP_JMP, 0, elselabel, sline(expr)) + return + } + case *ast.TrueExpr, *ast.NumberExpr, *ast.StringExpr: + if !hasnextcond { + return + } + case *ast.UnaryNotOpExpr: + compileBranchCondition(context, reg, ex.Expr, elselabel, thenlabel, !hasnextcond) + return + case *ast.LogicalOpExpr: + switch ex.Operator { + case "and": + nextcondlabel := context.NewLabel() + compileBranchCondition(context, reg, ex.Lhs, nextcondlabel, elselabel, false) + context.SetLabelPc(nextcondlabel, context.Code.LastPC()) + compileBranchCondition(context, reg, ex.Rhs, thenlabel, elselabel, hasnextcond) + case "or": + nextcondlabel := context.NewLabel() + compileBranchCondition(context, reg, ex.Lhs, thenlabel, nextcondlabel, true) + context.SetLabelPc(nextcondlabel, context.Code.LastPC()) + compileBranchCondition(context, reg, ex.Rhs, thenlabel, elselabel, hasnextcond) + } + return + case *ast.RelationalOpExpr: + compileRelationalOpExprAux(context, reg, ex, flip, jumplabel) + return + } + + a := reg + compileExprWithMVPropagation(context, expr, ®, &a) + code.AddABC(OP_TEST, a, 0, 0^flip, sline(expr)) + code.AddASbx(OP_JMP, 0, jumplabel, sline(expr)) +} // }}} + +func compileWhileStmt(context *funcContext, stmt *ast.WhileStmt) { // {{{ + thenlabel := context.NewLabel() + elselabel := context.NewLabel() + condlabel := context.NewLabel() + + context.SetLabelPc(condlabel, context.Code.LastPC()) + compileBranchCondition(context, context.RegTop(), stmt.Condition, thenlabel, elselabel, false) + context.SetLabelPc(thenlabel, context.Code.LastPC()) + context.EnterBlock(elselabel, stmt) + compileChunk(context, stmt.Stmts) + context.CloseUpvalues() + context.Code.AddASbx(OP_JMP, 0, condlabel, eline(stmt)) + context.LeaveBlock() + context.SetLabelPc(elselabel, context.Code.LastPC()) +} // }}} + +func compileRepeatStmt(context *funcContext, stmt *ast.RepeatStmt) { // {{{ + initlabel := context.NewLabel() + thenlabel := context.NewLabel() + elselabel := context.NewLabel() + + context.SetLabelPc(initlabel, context.Code.LastPC()) + context.SetLabelPc(elselabel, context.Code.LastPC()) + context.EnterBlock(thenlabel, stmt) + compileChunk(context, stmt.Stmts) + compileBranchCondition(context, context.RegTop(), stmt.Condition, thenlabel, elselabel, false) + + context.SetLabelPc(thenlabel, context.Code.LastPC()) + n := context.LeaveBlock() + + if n > -1 { + label := context.NewLabel() + context.Code.AddASbx(OP_JMP, 0, label, eline(stmt)) + context.SetLabelPc(elselabel, context.Code.LastPC()) + context.Code.AddABC(OP_CLOSE, n, 0, 0, eline(stmt)) + context.Code.AddASbx(OP_JMP, 0, initlabel, eline(stmt)) + context.SetLabelPc(label, context.Code.LastPC()) + } + +} // }}} + +func compileBreakStmt(context *funcContext, stmt *ast.BreakStmt) { // {{{ + for block := context.Block; block != nil; block = block.Parent { + if label := block.BreakLabel; label != labelNoJump { + if block.RefUpvalue { + context.Code.AddABC(OP_CLOSE, block.Parent.LocalVars.LastIndex(), 0, 0, sline(stmt)) + } + context.Code.AddASbx(OP_JMP, 0, label, sline(stmt)) + return + } + } + raiseCompileError(context, sline(stmt), "no loop to break") +} // }}} + +func compileFuncDefStmt(context *funcContext, stmt *ast.FuncDefStmt) { // {{{ + if stmt.Name.Func == nil { + reg := context.RegTop() + var treg, kreg int + compileExprWithKMVPropagation(context, stmt.Name.Receiver, ®, &treg) + kreg = loadRk(context, ®, stmt.Func, LString(stmt.Name.Method)) + compileExpr(context, reg, stmt.Func, ecfuncdef) + context.Code.AddABC(OP_SETTABLE, treg, kreg, reg, sline(stmt.Name.Receiver)) + } else { + astmt := &ast.AssignStmt{Lhs: []ast.Expr{stmt.Name.Func}, Rhs: []ast.Expr{stmt.Func}} + astmt.SetLine(sline(stmt.Func)) + astmt.SetLastLine(eline(stmt.Func)) + compileAssignStmt(context, astmt) + } +} // }}} + +func compileNumberForStmt(context *funcContext, stmt *ast.NumberForStmt) { // {{{ + code := context.Code + endlabel := context.NewLabel() + ec := &expcontext{} + + context.EnterBlock(endlabel, stmt) + reg := context.RegTop() + rindex := context.RegisterLocalVar("(for index)") + ecupdate(ec, ecLocal, rindex, 0) + compileExpr(context, reg, stmt.Init, ec) + + reg = context.RegTop() + rlimit := context.RegisterLocalVar("(for limit)") + ecupdate(ec, ecLocal, rlimit, 0) + compileExpr(context, reg, stmt.Limit, ec) + + reg = context.RegTop() + rstep := context.RegisterLocalVar("(for step)") + if stmt.Step == nil { + stmt.Step = &ast.NumberExpr{Value: "1"} + stmt.Step.SetLine(sline(stmt.Init)) + } + ecupdate(ec, ecLocal, rstep, 0) + compileExpr(context, reg, stmt.Step, ec) + + code.AddASbx(OP_FORPREP, rindex, 0, sline(stmt)) + + context.RegisterLocalVar(stmt.Name) + + bodypc := code.LastPC() + compileChunk(context, stmt.Stmts) + + context.LeaveBlock() + + flpc := code.LastPC() + code.AddASbx(OP_FORLOOP, rindex, bodypc-(flpc+1), sline(stmt)) + + context.SetLabelPc(endlabel, code.LastPC()) + code.SetSbx(bodypc, flpc-bodypc) + +} // }}} + +func compileGenericForStmt(context *funcContext, stmt *ast.GenericForStmt) { // {{{ + code := context.Code + endlabel := context.NewLabel() + bodylabel := context.NewLabel() + fllabel := context.NewLabel() + nnames := len(stmt.Names) + + context.EnterBlock(endlabel, stmt) + rgen := context.RegisterLocalVar("(for generator)") + context.RegisterLocalVar("(for state)") + context.RegisterLocalVar("(for control)") + + compileRegAssignment(context, stmt.Names, stmt.Exprs, context.RegTop()-3, 3, sline(stmt)) + + code.AddASbx(OP_JMP, 0, fllabel, sline(stmt)) + + for _, name := range stmt.Names { + context.RegisterLocalVar(name) + } + + context.SetLabelPc(bodylabel, code.LastPC()) + compileChunk(context, stmt.Stmts) + + context.LeaveBlock() + + context.SetLabelPc(fllabel, code.LastPC()) + code.AddABC(OP_TFORLOOP, rgen, 0, nnames, sline(stmt)) + code.AddASbx(OP_JMP, 0, bodylabel, sline(stmt)) + + context.SetLabelPc(endlabel, code.LastPC()) +} // }}} + +func compileExpr(context *funcContext, reg int, expr ast.Expr, ec *expcontext) int { // {{{ + code := context.Code + sreg := savereg(ec, reg) + sused := 1 + if sreg < reg { + sused = 0 + } + + switch ex := expr.(type) { + case *ast.StringExpr: + code.AddABx(OP_LOADK, sreg, context.ConstIndex(LString(ex.Value)), sline(ex)) + return sused + case *ast.NumberExpr: + num, err := parseNumber(ex.Value) + if err != nil { + num = LNumber(math.NaN()) + } + code.AddABx(OP_LOADK, sreg, context.ConstIndex(num), sline(ex)) + return sused + case *constLValueExpr: + code.AddABx(OP_LOADK, sreg, context.ConstIndex(ex.Value), sline(ex)) + return sused + case *ast.NilExpr: + code.AddABC(OP_LOADNIL, sreg, sreg, 0, sline(ex)) + return sused + case *ast.FalseExpr: + code.AddABC(OP_LOADBOOL, sreg, 0, 0, sline(ex)) + return sused + case *ast.TrueExpr: + code.AddABC(OP_LOADBOOL, sreg, 1, 0, sline(ex)) + return sused + case *ast.IdentExpr: + switch getIdentRefType(context, context, ex) { + case ecGlobal: + code.AddABx(OP_GETGLOBAL, sreg, context.ConstIndex(LString(ex.Value)), sline(ex)) + case ecUpvalue: + code.AddABC(OP_GETUPVAL, sreg, context.Upvalues.RegisterUnique(ex.Value), 0, sline(ex)) + case ecLocal: + b := context.FindLocalVar(ex.Value) + code.AddABC(OP_MOVE, sreg, b, 0, sline(ex)) + } + return sused + case *ast.Comma3Expr: + if context.Proto.IsVarArg == 0 { + raiseCompileError(context, sline(ex), "cannot use '...' outside a vararg function") + } + context.Proto.IsVarArg &= ^VarArgNeedsArg + code.AddABC(OP_VARARG, sreg, 2+ec.varargopt, 0, sline(ex)) + if context.RegTop() > (sreg+2+ec.varargopt) || ec.varargopt < -1 { + return 0 + } + return (sreg + 1 + ec.varargopt) - reg + case *ast.AttrGetExpr: + a := sreg + b := reg + compileExprWithMVPropagation(context, ex.Object, ®, &b) + c := reg + compileExprWithKMVPropagation(context, ex.Key, ®, &c) + opcode := OP_GETTABLE + if _, ok := ex.Key.(*ast.StringExpr); ok { + opcode = OP_GETTABLEKS + } + code.AddABC(opcode, a, b, c, sline(ex)) + return sused + case *ast.TableExpr: + compileTableExpr(context, reg, ex, ec) + return 1 + case *ast.ArithmeticOpExpr: + compileArithmeticOpExpr(context, reg, ex, ec) + return sused + case *ast.StringConcatOpExpr: + compileStringConcatOpExpr(context, reg, ex, ec) + return sused + case *ast.UnaryMinusOpExpr, *ast.UnaryNotOpExpr, *ast.UnaryLenOpExpr: + compileUnaryOpExpr(context, reg, ex, ec) + return sused + case *ast.RelationalOpExpr: + compileRelationalOpExpr(context, reg, ex, ec) + return sused + case *ast.LogicalOpExpr: + compileLogicalOpExpr(context, reg, ex, ec) + return sused + case *ast.FuncCallExpr: + return compileFuncCallExpr(context, reg, ex, ec) + case *ast.FunctionExpr: + childcontext := newFuncContext(context.Proto.SourceName, context) + compileFunctionExpr(childcontext, ex, ec) + protono := len(context.Proto.FunctionPrototypes) + context.Proto.FunctionPrototypes = append(context.Proto.FunctionPrototypes, childcontext.Proto) + code.AddABx(OP_CLOSURE, sreg, protono, sline(ex)) + for _, upvalue := range childcontext.Upvalues.List() { + localidx, block := context.FindLocalVarAndBlock(upvalue.Name) + if localidx > -1 { + code.AddABC(OP_MOVE, 0, localidx, 0, sline(ex)) + block.RefUpvalue = true + } else { + upvalueidx := context.Upvalues.Find(upvalue.Name) + if upvalueidx < 0 { + upvalueidx = context.Upvalues.RegisterUnique(upvalue.Name) + } + code.AddABC(OP_GETUPVAL, 0, upvalueidx, 0, sline(ex)) + } + } + return sused + default: + panic(fmt.Sprintf("expr %v not implemented.", reflect.TypeOf(ex).Elem().Name())) + } + + panic("should not reach here") + return sused +} // }}} + +func compileExprWithPropagation(context *funcContext, expr ast.Expr, reg *int, save *int, propergator func(int, *int, *int, int)) { // {{{ + reginc := compileExpr(context, *reg, expr, ecnone(0)) + if _, ok := expr.(*ast.LogicalOpExpr); ok { + *save = *reg + *reg = *reg + reginc + } else { + propergator(context.RegTop(), save, reg, reginc) + } +} // }}} + +func compileExprWithKMVPropagation(context *funcContext, expr ast.Expr, reg *int, save *int) { // {{{ + compileExprWithPropagation(context, expr, reg, save, context.Code.PropagateKMV) +} // }}} + +func compileExprWithMVPropagation(context *funcContext, expr ast.Expr, reg *int, save *int) { // {{{ + compileExprWithPropagation(context, expr, reg, save, context.Code.PropagateMV) +} // }}} + +func constFold(exp ast.Expr) ast.Expr { // {{{ + switch expr := exp.(type) { + case *ast.ArithmeticOpExpr: + lvalue, lisconst := lnumberValue(expr.Lhs) + rvalue, risconst := lnumberValue(expr.Rhs) + if lisconst && risconst { + switch expr.Operator { + case "+": + return &constLValueExpr{Value: lvalue + rvalue} + case "-": + return &constLValueExpr{Value: lvalue - rvalue} + case "*": + return &constLValueExpr{Value: lvalue * rvalue} + case "/": + return &constLValueExpr{Value: lvalue / rvalue} + case "%": + return &constLValueExpr{Value: luaModulo(lvalue, rvalue)} + case "^": + return &constLValueExpr{Value: LNumber(math.Pow(float64(lvalue), float64(rvalue)))} + default: + panic(fmt.Sprintf("unknown binop: %v", expr.Operator)) + } + } else { + retexpr := *expr + retexpr.Lhs = constFold(expr.Lhs) + retexpr.Rhs = constFold(expr.Rhs) + return &retexpr + } + case *ast.UnaryMinusOpExpr: + expr.Expr = constFold(expr.Expr) + if value, ok := lnumberValue(expr.Expr); ok { + return &constLValueExpr{Value: LNumber(-value)} + } + return expr + default: + + return exp + } + return exp +} // }}} + +func compileFunctionExpr(context *funcContext, funcexpr *ast.FunctionExpr, ec *expcontext) { // {{{ + context.Proto.LineDefined = sline(funcexpr) + context.Proto.LastLineDefined = eline(funcexpr) + if len(funcexpr.ParList.Names) > maxRegisters { + raiseCompileError(context, context.Proto.LineDefined, "register overflow") + } + context.Proto.NumParameters = uint8(len(funcexpr.ParList.Names)) + if ec.ctype == ecMethod { + context.Proto.NumParameters += 1 + context.RegisterLocalVar("self") + } + for _, name := range funcexpr.ParList.Names { + context.RegisterLocalVar(name) + } + if funcexpr.ParList.HasVargs { + if CompatVarArg { + context.Proto.IsVarArg = VarArgHasArg | VarArgNeedsArg + if context.Parent != nil { + context.RegisterLocalVar("arg") + } + } + context.Proto.IsVarArg |= VarArgIsVarArg + } + + compileChunk(context, funcexpr.Stmts) + + context.Code.AddABC(OP_RETURN, 0, 1, 0, eline(funcexpr)) + context.EndScope() + context.Proto.Code = context.Code.List() + context.Proto.DbgSourcePositions = context.Code.PosList() + context.Proto.DbgUpvalues = context.Upvalues.Names() + context.Proto.NumUpvalues = uint8(len(context.Proto.DbgUpvalues)) + for _, clv := range context.Proto.Constants { + sv := "" + if slv, ok := clv.(LString); ok { + sv = string(slv) + } + context.Proto.stringConstants = append(context.Proto.stringConstants, sv) + } + patchCode(context) +} // }}} + +func compileTableExpr(context *funcContext, reg int, ex *ast.TableExpr, ec *expcontext) { // {{{ + code := context.Code + /* + tablereg := savereg(ec, reg) + if tablereg == reg { + reg += 1 + } + */ + tablereg := reg + reg++ + code.AddABC(OP_NEWTABLE, tablereg, 0, 0, sline(ex)) + tablepc := code.LastPC() + regbase := reg + + arraycount := 0 + lastvararg := false + for i, field := range ex.Fields { + islast := i == len(ex.Fields)-1 + if field.Key == nil { + if islast && isVarArgReturnExpr(field.Value) { + reg += compileExpr(context, reg, field.Value, ecnone(-2)) + lastvararg = true + } else { + reg += compileExpr(context, reg, field.Value, ecnone(0)) + arraycount += 1 + } + } else { + regorg := reg + b := reg + compileExprWithKMVPropagation(context, field.Key, ®, &b) + c := reg + compileExprWithKMVPropagation(context, field.Value, ®, &c) + opcode := OP_SETTABLE + if _, ok := field.Key.(*ast.StringExpr); ok { + opcode = OP_SETTABLEKS + } + code.AddABC(opcode, tablereg, b, c, sline(ex)) + reg = regorg + } + flush := arraycount % FieldsPerFlush + if (arraycount != 0 && (flush == 0 || islast)) || lastvararg { + reg = regbase + num := flush + if num == 0 { + num = FieldsPerFlush + } + c := (arraycount-1)/FieldsPerFlush + 1 + b := num + if islast && isVarArgReturnExpr(field.Value) { + b = 0 + } + line := field.Value + if field.Key != nil { + line = field.Key + } + if c > 511 { + c = 0 + } + code.AddABC(OP_SETLIST, tablereg, b, c, sline(line)) + if c == 0 { + code.Add(uint32(c), sline(line)) + } + } + } + code.SetB(tablepc, int2Fb(arraycount)) + code.SetC(tablepc, int2Fb(len(ex.Fields)-arraycount)) + if ec.ctype == ecLocal && ec.reg != tablereg { + code.AddABC(OP_MOVE, ec.reg, tablereg, 0, sline(ex)) + } +} // }}} + +func compileArithmeticOpExpr(context *funcContext, reg int, expr *ast.ArithmeticOpExpr, ec *expcontext) { // {{{ + exp := constFold(expr) + if ex, ok := exp.(*constLValueExpr); ok { + exp.SetLine(sline(expr)) + compileExpr(context, reg, ex, ec) + return + } + expr, _ = exp.(*ast.ArithmeticOpExpr) + a := savereg(ec, reg) + b := reg + compileExprWithKMVPropagation(context, expr.Lhs, ®, &b) + c := reg + compileExprWithKMVPropagation(context, expr.Rhs, ®, &c) + + op := 0 + switch expr.Operator { + case "+": + op = OP_ADD + case "-": + op = OP_SUB + case "*": + op = OP_MUL + case "/": + op = OP_DIV + case "%": + op = OP_MOD + case "^": + op = OP_POW + } + context.Code.AddABC(op, a, b, c, sline(expr)) +} // }}} + +func compileStringConcatOpExpr(context *funcContext, reg int, expr *ast.StringConcatOpExpr, ec *expcontext) { // {{{ + code := context.Code + crange := 1 + for current := expr.Rhs; current != nil; { + if ex, ok := current.(*ast.StringConcatOpExpr); ok { + crange += 1 + current = ex.Rhs + } else { + current = nil + } + } + a := savereg(ec, reg) + basereg := reg + reg += compileExpr(context, reg, expr.Lhs, ecnone(0)) + reg += compileExpr(context, reg, expr.Rhs, ecnone(0)) + for pc := code.LastPC(); pc != 0 && opGetOpCode(code.At(pc)) == OP_CONCAT; pc-- { + code.Pop() + } + code.AddABC(OP_CONCAT, a, basereg, basereg+crange, sline(expr)) +} // }}} + +func compileUnaryOpExpr(context *funcContext, reg int, expr ast.Expr, ec *expcontext) { // {{{ + opcode := 0 + code := context.Code + var operandexpr ast.Expr + switch ex := expr.(type) { + case *ast.UnaryMinusOpExpr: + exp := constFold(ex) + if lvexpr, ok := exp.(*constLValueExpr); ok { + exp.SetLine(sline(expr)) + compileExpr(context, reg, lvexpr, ec) + return + } + ex, _ = exp.(*ast.UnaryMinusOpExpr) + operandexpr = ex.Expr + opcode = OP_UNM + case *ast.UnaryNotOpExpr: + switch ex.Expr.(type) { + case *ast.TrueExpr: + code.AddABC(OP_LOADBOOL, savereg(ec, reg), 0, 0, sline(expr)) + return + case *ast.FalseExpr, *ast.NilExpr: + code.AddABC(OP_LOADBOOL, savereg(ec, reg), 1, 0, sline(expr)) + return + default: + opcode = OP_NOT + operandexpr = ex.Expr + } + case *ast.UnaryLenOpExpr: + opcode = OP_LEN + operandexpr = ex.Expr + } + + a := savereg(ec, reg) + b := reg + compileExprWithMVPropagation(context, operandexpr, ®, &b) + code.AddABC(opcode, a, b, 0, sline(expr)) +} // }}} + +func compileRelationalOpExprAux(context *funcContext, reg int, expr *ast.RelationalOpExpr, flip int, label int) { // {{{ + code := context.Code + b := reg + compileExprWithKMVPropagation(context, expr.Lhs, ®, &b) + c := reg + compileExprWithKMVPropagation(context, expr.Rhs, ®, &c) + switch expr.Operator { + case "<": + code.AddABC(OP_LT, 0^flip, b, c, sline(expr)) + case ">": + code.AddABC(OP_LT, 0^flip, c, b, sline(expr)) + case "<=": + code.AddABC(OP_LE, 0^flip, b, c, sline(expr)) + case ">=": + code.AddABC(OP_LE, 0^flip, c, b, sline(expr)) + case "==": + code.AddABC(OP_EQ, 0^flip, b, c, sline(expr)) + case "~=": + code.AddABC(OP_EQ, 1^flip, b, c, sline(expr)) + } + code.AddASbx(OP_JMP, 0, label, sline(expr)) +} // }}} + +func compileRelationalOpExpr(context *funcContext, reg int, expr *ast.RelationalOpExpr, ec *expcontext) { // {{{ + a := savereg(ec, reg) + code := context.Code + jumplabel := context.NewLabel() + compileRelationalOpExprAux(context, reg, expr, 1, jumplabel) + code.AddABC(OP_LOADBOOL, a, 0, 1, sline(expr)) + context.SetLabelPc(jumplabel, code.LastPC()) + code.AddABC(OP_LOADBOOL, a, 1, 0, sline(expr)) +} // }}} + +func compileLogicalOpExpr(context *funcContext, reg int, expr *ast.LogicalOpExpr, ec *expcontext) { // {{{ + a := savereg(ec, reg) + code := context.Code + endlabel := context.NewLabel() + lb := &lblabels{context.NewLabel(), context.NewLabel(), endlabel, false} + nextcondlabel := context.NewLabel() + if expr.Operator == "and" { + compileLogicalOpExprAux(context, reg, expr.Lhs, ec, nextcondlabel, endlabel, false, lb) + context.SetLabelPc(nextcondlabel, code.LastPC()) + compileLogicalOpExprAux(context, reg, expr.Rhs, ec, endlabel, endlabel, false, lb) + } else { + compileLogicalOpExprAux(context, reg, expr.Lhs, ec, endlabel, nextcondlabel, true, lb) + context.SetLabelPc(nextcondlabel, code.LastPC()) + compileLogicalOpExprAux(context, reg, expr.Rhs, ec, endlabel, endlabel, false, lb) + } + + if lb.b { + context.SetLabelPc(lb.f, code.LastPC()) + code.AddABC(OP_LOADBOOL, a, 0, 1, sline(expr)) + context.SetLabelPc(lb.t, code.LastPC()) + code.AddABC(OP_LOADBOOL, a, 1, 0, sline(expr)) + } + + lastinst := code.Last() + if opGetOpCode(lastinst) == OP_JMP && opGetArgSbx(lastinst) == endlabel { + code.Pop() + } + + context.SetLabelPc(endlabel, code.LastPC()) +} // }}} + +func compileLogicalOpExprAux(context *funcContext, reg int, expr ast.Expr, ec *expcontext, thenlabel, elselabel int, hasnextcond bool, lb *lblabels) { // {{{ + // TODO folding constants? + code := context.Code + flip := 0 + jumplabel := elselabel + if hasnextcond { + flip = 1 + jumplabel = thenlabel + } + + switch ex := expr.(type) { + case *ast.FalseExpr: + if elselabel == lb.e { + code.AddASbx(OP_JMP, 0, lb.f, sline(expr)) + lb.b = true + } else { + code.AddASbx(OP_JMP, 0, elselabel, sline(expr)) + } + return + case *ast.NilExpr: + if elselabel == lb.e { + compileExpr(context, reg, expr, ec) + code.AddASbx(OP_JMP, 0, lb.e, sline(expr)) + } else { + code.AddASbx(OP_JMP, 0, elselabel, sline(expr)) + } + return + case *ast.TrueExpr: + if thenlabel == lb.e { + code.AddASbx(OP_JMP, 0, lb.t, sline(expr)) + lb.b = true + } else { + code.AddASbx(OP_JMP, 0, thenlabel, sline(expr)) + } + return + case *ast.NumberExpr, *ast.StringExpr: + if thenlabel == lb.e { + compileExpr(context, reg, expr, ec) + code.AddASbx(OP_JMP, 0, lb.e, sline(expr)) + } else { + code.AddASbx(OP_JMP, 0, thenlabel, sline(expr)) + } + return + case *ast.LogicalOpExpr: + switch ex.Operator { + case "and": + nextcondlabel := context.NewLabel() + compileLogicalOpExprAux(context, reg, ex.Lhs, ec, nextcondlabel, elselabel, false, lb) + context.SetLabelPc(nextcondlabel, context.Code.LastPC()) + compileLogicalOpExprAux(context, reg, ex.Rhs, ec, thenlabel, elselabel, hasnextcond, lb) + case "or": + nextcondlabel := context.NewLabel() + compileLogicalOpExprAux(context, reg, ex.Lhs, ec, thenlabel, nextcondlabel, true, lb) + context.SetLabelPc(nextcondlabel, context.Code.LastPC()) + compileLogicalOpExprAux(context, reg, ex.Rhs, ec, thenlabel, elselabel, hasnextcond, lb) + } + return + case *ast.RelationalOpExpr: + if thenlabel == elselabel { + flip ^= 1 + jumplabel = lb.t + lb.b = true + } else if thenlabel == lb.e { + jumplabel = lb.t + lb.b = true + } else if elselabel == lb.e { + jumplabel = lb.f + lb.b = true + } + compileRelationalOpExprAux(context, reg, ex, flip, jumplabel) + return + } + + if !hasnextcond && thenlabel == elselabel { + reg += compileExpr(context, reg, expr, ec) + } else { + a := reg + sreg := savereg(ec, a) + reg += compileExpr(context, reg, expr, ecnone(0)) + if sreg == a { + code.AddABC(OP_TEST, a, 0, 0^flip, sline(expr)) + } else { + code.AddABC(OP_TESTSET, sreg, a, 0^flip, sline(expr)) + } + } + code.AddASbx(OP_JMP, 0, jumplabel, sline(expr)) +} // }}} + +func compileFuncCallExpr(context *funcContext, reg int, expr *ast.FuncCallExpr, ec *expcontext) int { // {{{ + funcreg := reg + if ec.ctype == ecLocal && ec.reg == (int(context.Proto.NumParameters)-1) { + funcreg = ec.reg + reg = ec.reg + } + argc := len(expr.Args) + islastvararg := false + name := "(anonymous)" + + if expr.Func != nil { // hoge.func() + reg += compileExpr(context, reg, expr.Func, ecnone(0)) + name = getExprName(context, expr.Func) + } else { // hoge:method() + b := reg + compileExprWithMVPropagation(context, expr.Receiver, ®, &b) + c := loadRk(context, ®, expr, LString(expr.Method)) + context.Code.AddABC(OP_SELF, funcreg, b, c, sline(expr)) + // increments a register for an implicit "self" + reg = b + 1 + reg2 := funcreg + 2 + if reg2 > reg { + reg = reg2 + } + argc += 1 + name = string(expr.Method) + } + + for i, ar := range expr.Args { + islastvararg = (i == len(expr.Args)-1) && isVarArgReturnExpr(ar) + if islastvararg { + compileExpr(context, reg, ar, ecnone(-2)) + } else { + reg += compileExpr(context, reg, ar, ecnone(0)) + } + } + b := argc + 1 + if islastvararg { + b = 0 + } + context.Code.AddABC(OP_CALL, funcreg, b, ec.varargopt+2, sline(expr)) + context.Proto.DbgCalls = append(context.Proto.DbgCalls, DbgCall{Pc: context.Code.LastPC(), Name: name}) + + if ec.varargopt == 0 && ec.ctype == ecLocal && funcreg != ec.reg { + context.Code.AddABC(OP_MOVE, ec.reg, funcreg, 0, sline(expr)) + return 1 + } + if context.RegTop() > (funcreg+2+ec.varargopt) || ec.varargopt < -1 { + return 0 + } + return ec.varargopt + 1 +} // }}} + +func loadRk(context *funcContext, reg *int, expr ast.Expr, cnst LValue) int { // {{{ + cindex := context.ConstIndex(cnst) + if cindex <= opMaxIndexRk { + return opRkAsk(cindex) + } else { + ret := *reg + *reg++ + context.Code.AddABx(OP_LOADK, ret, cindex, sline(expr)) + return ret + } +} // }}} + +func getIdentRefType(context *funcContext, current *funcContext, expr *ast.IdentExpr) expContextType { // {{{ + if current == nil { + return ecGlobal + } else if current.FindLocalVar(expr.Value) > -1 { + if current == context { + return ecLocal + } + return ecUpvalue + } + return getIdentRefType(context, current.Parent, expr) +} // }}} + +func getExprName(context *funcContext, expr ast.Expr) string { // {{{ + switch ex := expr.(type) { + case *ast.IdentExpr: + return ex.Value + case *ast.AttrGetExpr: + switch kex := ex.Key.(type) { + case *ast.StringExpr: + return kex.Value + } + return "?" + } + return "?" +} // }}} + +func patchCode(context *funcContext) { // {{{ + maxreg := 1 + if np := int(context.Proto.NumParameters); np > 1 { + maxreg = np + } + moven := 0 + code := context.Code.List() + for pc := 0; pc < len(code); pc++ { + inst := code[pc] + curop := opGetOpCode(inst) + switch curop { + case OP_CLOSURE: + pc += int(context.Proto.FunctionPrototypes[opGetArgBx(inst)].NumUpvalues) + moven = 0 + continue + case OP_SETGLOBAL, OP_SETUPVAL, OP_EQ, OP_LT, OP_LE, OP_TEST, + OP_TAILCALL, OP_RETURN, OP_FORPREP, OP_FORLOOP, OP_TFORLOOP, + OP_SETLIST, OP_CLOSE: + /* nothing to do */ + case OP_CALL: + if reg := opGetArgA(inst) + opGetArgC(inst) - 2; reg > maxreg { + maxreg = reg + } + case OP_VARARG: + if reg := opGetArgA(inst) + opGetArgB(inst) - 1; reg > maxreg { + maxreg = reg + } + case OP_SELF: + if reg := opGetArgA(inst) + 1; reg > maxreg { + maxreg = reg + } + case OP_LOADNIL: + if reg := opGetArgB(inst); reg > maxreg { + maxreg = reg + } + case OP_JMP: // jump to jump optimization + distance := 0 + count := 0 // avoiding infinite loops + for jmp := inst; opGetOpCode(jmp) == OP_JMP && count < 5; jmp = context.Code.At(pc + distance + 1) { + d := context.GetLabelPc(opGetArgSbx(jmp)) - pc + if d > opMaxArgSbx { + if distance == 0 { + raiseCompileError(context, context.Proto.LineDefined, "too long to jump.") + } + break + } + distance = d + count++ + } + if distance == 0 { + context.Code.SetOpCode(pc, OP_NOP) + } else { + context.Code.SetSbx(pc, distance) + } + default: + if reg := opGetArgA(inst); reg > maxreg { + maxreg = reg + } + } + + // bulk move optimization(reducing op dipatch costs) + if curop == OP_MOVE { + moven++ + } else { + if moven > 1 { + context.Code.SetOpCode(pc-moven, OP_MOVEN) + context.Code.SetC(pc-moven, intMin(moven-1, opMaxArgsC)) + } + moven = 0 + } + } + maxreg++ + if maxreg > maxRegisters { + raiseCompileError(context, context.Proto.LineDefined, "register overflow(too many local variables)") + } + context.Proto.NumUsedRegisters = uint8(maxreg) +} // }}} + +func Compile(chunk []ast.Stmt, name string) (proto *FunctionProto, err error) { // {{{ + defer func() { + if rcv := recover(); rcv != nil { + if _, ok := rcv.(*CompileError); ok { + err = rcv.(error) + } else { + panic(rcv) + } + } + }() + err = nil + parlist := &ast.ParList{HasVargs: true, Names: []string{}} + funcexpr := &ast.FunctionExpr{ParList: parlist, Stmts: chunk} + context := newFuncContext(name, nil) + compileFunctionExpr(context, funcexpr, ecnone(0)) + proto = context.Proto + return +} // }}} diff --git a/vendor/github.com/yuin/gopher-lua/config.go b/vendor/github.com/yuin/gopher-lua/config.go new file mode 100644 index 00000000000..4eda27c3cc3 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/config.go @@ -0,0 +1,34 @@ +package lua + +import ( + "os" +) + +var CompatVarArg = true +var FieldsPerFlush = 50 +var RegistrySize = 256 * 20 +var CallStackSize = 256 +var MaxTableGetLoop = 100 +var MaxArrayIndex = 67108864 + +type LNumber float64 + +const LNumberBit = 64 +const LNumberScanFormat = "%f" + +var LuaPath = "LUA_PATH" +var LuaLDir string +var LuaPathDefault string +var LuaOS string + +func init() { + if os.PathSeparator == '/' { // unix-like + LuaOS = "unix" + LuaLDir = "/usr/local/share/lua/5.1" + LuaPathDefault = "./?.lua;" + LuaLDir + "/?.lua;" + LuaLDir + "/?/init.lua" + } else { // windows + LuaOS = "windows" + LuaLDir = "!\\lua" + LuaPathDefault = ".\\?.lua;" + LuaLDir + "\\?.lua;" + LuaLDir + "\\?\\init.lua" + } +} diff --git a/vendor/github.com/yuin/gopher-lua/coroutinelib.go b/vendor/github.com/yuin/gopher-lua/coroutinelib.go new file mode 100644 index 00000000000..d42c41a1dfc --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/coroutinelib.go @@ -0,0 +1,112 @@ +package lua + +func OpenCoroutine(L *LState) int { + // TODO: Tie module name to contents of linit.go? + mod := L.RegisterModule(CoroutineLibName, coFuncs) + L.Push(mod) + return 1 +} + +var coFuncs = map[string]LGFunction{ + "create": coCreate, + "yield": coYield, + "resume": coResume, + "running": coRunning, + "status": coStatus, + "wrap": coWrap, +} + +func coCreate(L *LState) int { + fn := L.CheckFunction(1) + newthread, _ := L.NewThread() + base := 0 + newthread.stack.Push(callFrame{ + Fn: fn, + Pc: 0, + Base: base, + LocalBase: base + 1, + ReturnBase: base, + NArgs: 0, + NRet: MultRet, + Parent: nil, + TailCall: 0, + }) + L.Push(newthread) + return 1 +} + +func coYield(L *LState) int { + return -1 +} + +func coResume(L *LState) int { + th := L.CheckThread(1) + if L.G.CurrentThread == th { + msg := "can not resume a running thread" + if th.wrapped { + L.RaiseError(msg) + return 0 + } + L.Push(LFalse) + L.Push(LString(msg)) + return 2 + } + if th.Dead { + msg := "can not resume a dead thread" + if th.wrapped { + L.RaiseError(msg) + return 0 + } + L.Push(LFalse) + L.Push(LString(msg)) + return 2 + } + th.Parent = L + L.G.CurrentThread = th + if !th.isStarted() { + cf := th.stack.Last() + th.currentFrame = cf + th.SetTop(0) + nargs := L.GetTop() - 1 + L.XMoveTo(th, nargs) + cf.NArgs = nargs + th.initCallFrame(cf) + th.Panic = panicWithoutTraceback + } else { + nargs := L.GetTop() - 1 + L.XMoveTo(th, nargs) + } + top := L.GetTop() + threadRun(th) + return L.GetTop() - top +} + +func coRunning(L *LState) int { + if L.G.MainThread == L { + L.Push(LNil) + return 1 + } + L.Push(L.G.CurrentThread) + return 1 +} + +func coStatus(L *LState) int { + L.Push(LString(L.Status(L.CheckThread(1)))) + return 1 +} + +func wrapaux(L *LState) int { + L.Insert(L.ToThread(UpvalueIndex(1)), 1) + return coResume(L) +} + +func coWrap(L *LState) int { + coCreate(L) + L.CheckThread(L.GetTop()).wrapped = true + v := L.Get(L.GetTop()) + L.Pop(1) + L.Push(L.NewClosure(wrapaux, v)) + return 1 +} + +// diff --git a/vendor/github.com/yuin/gopher-lua/debuglib.go b/vendor/github.com/yuin/gopher-lua/debuglib.go new file mode 100644 index 00000000000..c1f613f372e --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/debuglib.go @@ -0,0 +1,162 @@ +package lua + +import ( + "fmt" + "strings" +) + +func OpenDebug(L *LState) int { + dbgmod := L.RegisterModule(DebugLibName, debugFuncs) + L.Push(dbgmod) + return 1 +} + +var debugFuncs = map[string]LGFunction{ + "getfenv": debugGetFEnv, + "getinfo": debugGetInfo, + "getlocal": debugGetLocal, + "getmetatable": debugGetMetatable, + "getupvalue": debugGetUpvalue, + "setfenv": debugSetFEnv, + "setlocal": debugSetLocal, + "setmetatable": debugSetMetatable, + "setupvalue": debugSetUpvalue, + "traceback": debugTraceback, +} + +func debugGetFEnv(L *LState) int { + L.Push(L.GetFEnv(L.CheckAny(1))) + return 1 +} + +func debugGetInfo(L *LState) int { + L.CheckTypes(1, LTFunction, LTNumber) + arg1 := L.Get(1) + what := L.OptString(2, "Slunf") + var dbg *Debug + var fn LValue + var err error + var ok bool + switch lv := arg1.(type) { + case *LFunction: + dbg = &Debug{} + fn, err = L.GetInfo(">"+what, dbg, lv) + case LNumber: + dbg, ok = L.GetStack(int(lv)) + if !ok { + L.Push(LNil) + return 1 + } + fn, err = L.GetInfo(what, dbg, LNil) + } + + if err != nil { + L.Push(LNil) + return 1 + } + tbl := L.NewTable() + if len(dbg.Name) > 0 { + tbl.RawSetString("name", LString(dbg.Name)) + } else { + tbl.RawSetString("name", LNil) + } + tbl.RawSetString("what", LString(dbg.What)) + tbl.RawSetString("source", LString(dbg.Source)) + tbl.RawSetString("currentline", LNumber(dbg.CurrentLine)) + tbl.RawSetString("nups", LNumber(dbg.NUpvalues)) + tbl.RawSetString("linedefined", LNumber(dbg.LineDefined)) + tbl.RawSetString("lastlinedefined", LNumber(dbg.LastLineDefined)) + tbl.RawSetString("func", fn) + L.Push(tbl) + return 1 +} + +func debugGetLocal(L *LState) int { + level := L.CheckInt(1) + idx := L.CheckInt(2) + dbg, ok := L.GetStack(level) + if !ok { + L.ArgError(1, "level out of range") + } + name, value := L.GetLocal(dbg, idx) + if len(name) > 0 { + L.Push(LString(name)) + L.Push(value) + return 2 + } + L.Push(LNil) + return 1 +} + +func debugGetMetatable(L *LState) int { + L.Push(L.GetMetatable(L.CheckAny(1))) + return 1 +} + +func debugGetUpvalue(L *LState) int { + fn := L.CheckFunction(1) + idx := L.CheckInt(2) + name, value := L.GetUpvalue(fn, idx) + if len(name) > 0 { + L.Push(LString(name)) + L.Push(value) + return 2 + } + L.Push(LNil) + return 1 +} + +func debugSetFEnv(L *LState) int { + L.SetFEnv(L.CheckAny(1), L.CheckAny(2)) + return 0 +} + +func debugSetLocal(L *LState) int { + level := L.CheckInt(1) + idx := L.CheckInt(2) + value := L.CheckAny(3) + dbg, ok := L.GetStack(level) + if !ok { + L.ArgError(1, "level out of range") + } + name := L.SetLocal(dbg, idx, value) + if len(name) > 0 { + L.Push(LString(name)) + } else { + L.Push(LNil) + } + return 1 +} + +func debugSetMetatable(L *LState) int { + L.CheckTypes(2, LTNil, LTTable) + obj := L.Get(1) + mt := L.Get(2) + L.SetMetatable(obj, mt) + L.SetTop(1) + return 1 +} + +func debugSetUpvalue(L *LState) int { + fn := L.CheckFunction(1) + idx := L.CheckInt(2) + value := L.CheckAny(3) + name := L.SetUpvalue(fn, idx, value) + if len(name) > 0 { + L.Push(LString(name)) + } else { + L.Push(LNil) + } + return 1 +} + +func debugTraceback(L *LState) int { + msg := L.OptString(1, "") + level := L.OptInt(2, 1) + traceback := strings.TrimSpace(L.stackTrace(level)) + if len(msg) > 0 { + traceback = fmt.Sprintf("%s\n%s", msg, traceback) + } + L.Push(LString(traceback)) + return 1 +} diff --git a/vendor/github.com/yuin/gopher-lua/function.go b/vendor/github.com/yuin/gopher-lua/function.go new file mode 100644 index 00000000000..169e5407cac --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/function.go @@ -0,0 +1,193 @@ +package lua + +import ( + "fmt" + "strings" +) + +const ( + VarArgHasArg uint8 = 1 + VarArgIsVarArg uint8 = 2 + VarArgNeedsArg uint8 = 4 +) + +type DbgLocalInfo struct { + Name string + StartPc int + EndPc int +} + +type DbgCall struct { + Name string + Pc int +} + +type FunctionProto struct { + SourceName string + LineDefined int + LastLineDefined int + NumUpvalues uint8 + NumParameters uint8 + IsVarArg uint8 + NumUsedRegisters uint8 + Code []uint32 + Constants []LValue + FunctionPrototypes []*FunctionProto + + DbgSourcePositions []int + DbgLocals []*DbgLocalInfo + DbgCalls []DbgCall + DbgUpvalues []string + + stringConstants []string +} + +/* Upvalue {{{ */ + +type Upvalue struct { + next *Upvalue + reg *registry + index int + value LValue + closed bool +} + +func (uv *Upvalue) Value() LValue { + //if uv.IsClosed() { + if uv.closed || uv.reg == nil { + return uv.value + } + //return uv.reg.Get(uv.index) + return uv.reg.array[uv.index] +} + +func (uv *Upvalue) SetValue(value LValue) { + if uv.IsClosed() { + uv.value = value + } else { + uv.reg.Set(uv.index, value) + } +} + +func (uv *Upvalue) Close() { + value := uv.Value() + uv.closed = true + uv.value = value +} + +func (uv *Upvalue) IsClosed() bool { + return uv.closed || uv.reg == nil +} + +func UpvalueIndex(i int) int { + return GlobalsIndex - i +} + +/* }}} */ + +/* FunctionProto {{{ */ + +func newFunctionProto(name string) *FunctionProto { + return &FunctionProto{ + SourceName: name, + LineDefined: 0, + LastLineDefined: 0, + NumUpvalues: 0, + NumParameters: 0, + IsVarArg: 0, + NumUsedRegisters: 2, + Code: make([]uint32, 0, 128), + Constants: make([]LValue, 0, 32), + FunctionPrototypes: make([]*FunctionProto, 0, 16), + + DbgSourcePositions: make([]int, 0, 128), + DbgLocals: make([]*DbgLocalInfo, 0, 16), + DbgCalls: make([]DbgCall, 0, 128), + DbgUpvalues: make([]string, 0, 16), + + stringConstants: make([]string, 0, 32), + } +} + +func (fp *FunctionProto) String() string { + return fp.str(1, 0) +} + +func (fp *FunctionProto) str(level int, count int) string { + indent := strings.Repeat(" ", level-1) + buf := []string{} + buf = append(buf, fmt.Sprintf("%v; function [%v] definition (level %v)\n", + indent, count, level)) + buf = append(buf, fmt.Sprintf("%v; %v upvalues, %v params, %v stacks\n", + indent, fp.NumUpvalues, fp.NumParameters, fp.NumUsedRegisters)) + for reg, linfo := range fp.DbgLocals { + buf = append(buf, fmt.Sprintf("%v.local %v ; %v\n", indent, linfo.Name, reg)) + } + for reg, upvalue := range fp.DbgUpvalues { + buf = append(buf, fmt.Sprintf("%v.upvalue %v ; %v\n", indent, upvalue, reg)) + } + for reg, conzt := range fp.Constants { + buf = append(buf, fmt.Sprintf("%v.const %v ; %v\n", indent, conzt.String(), reg)) + } + buf = append(buf, "\n") + + protono := 0 + for no, code := range fp.Code { + inst := opGetOpCode(code) + if inst == OP_CLOSURE { + buf = append(buf, "\n") + buf = append(buf, fp.FunctionPrototypes[protono].str(level+1, protono)) + buf = append(buf, "\n") + protono++ + } + buf = append(buf, fmt.Sprintf("%v[%03d] %v (line:%v)\n", + indent, no+1, opToString(code), fp.DbgSourcePositions[no])) + + } + buf = append(buf, fmt.Sprintf("%v; end of function\n", indent)) + return strings.Join(buf, "") +} + +/* }}} */ + +/* LFunction {{{ */ + +func newLFunctionL(proto *FunctionProto, env *LTable, nupvalue int) *LFunction { + return &LFunction{ + IsG: false, + Env: env, + + Proto: proto, + GFunction: nil, + Upvalues: make([]*Upvalue, nupvalue), + } +} + +func newLFunctionG(gfunc LGFunction, env *LTable, nupvalue int) *LFunction { + return &LFunction{ + IsG: true, + Env: env, + + Proto: nil, + GFunction: gfunc, + Upvalues: make([]*Upvalue, nupvalue), + } +} + +func (fn *LFunction) LocalName(regno, pc int) (string, bool) { + if fn.IsG { + return "", false + } + p := fn.Proto + for i := 0; i < len(p.DbgLocals) && p.DbgLocals[i].StartPc < pc; i++ { + if pc < p.DbgLocals[i].EndPc { + regno-- + if regno == 0 { + return p.DbgLocals[i].Name, true + } + } + } + return "", false +} + +/* }}} */ diff --git a/vendor/github.com/yuin/gopher-lua/iolib.go b/vendor/github.com/yuin/gopher-lua/iolib.go new file mode 100644 index 00000000000..65404f12901 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/iolib.go @@ -0,0 +1,743 @@ +package lua + +import ( + "bufio" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "syscall" +) + +var ioFuncs = map[string]LGFunction{ + "close": ioClose, + "flush": ioFlush, + "lines": ioLines, + "input": ioInput, + "output": ioOutput, + "open": ioOpenFile, + "popen": ioPopen, + "read": ioRead, + "type": ioType, + "tmpfile": ioTmpFile, + "write": ioWrite, +} + +const lFileClass = "FILE*" + +type lFile struct { + fp *os.File + pp *exec.Cmd + writer io.Writer + reader *bufio.Reader + closed bool +} + +type lFileType int + +const ( + lFileFile lFileType = iota + lFileProcess +) + +const fileDefOutIndex = 1 +const fileDefInIndex = 2 +const fileDefaultWriteBuffer = 4096 +const fileDefaultReadBuffer = 4096 + +func checkFile(L *LState) *lFile { + ud := L.CheckUserData(1) + if file, ok := ud.Value.(*lFile); ok { + return file + } + L.ArgError(1, "file expected") + return nil +} + +func errorIfFileIsClosed(L *LState, file *lFile) { + if file.closed { + L.ArgError(1, "file is closed") + } +} + +func newFile(L *LState, file *os.File, path string, flag int, perm os.FileMode, writable, readable bool) (*LUserData, error) { + ud := L.NewUserData() + var err error + if file == nil { + file, err = os.OpenFile(path, flag, perm) + if err != nil { + return nil, err + } + } + lfile := &lFile{fp: file, pp: nil, writer: nil, reader: nil, closed: false} + ud.Value = lfile + if writable { + lfile.writer = file + } + if readable { + lfile.reader = bufio.NewReaderSize(file, fileDefaultReadBuffer) + } + L.SetMetatable(ud, L.GetTypeMetatable(lFileClass)) + return ud, nil +} + +func newProcess(L *LState, cmd string, writable, readable bool) (*LUserData, error) { + ud := L.NewUserData() + c, args := popenArgs(cmd) + pp := exec.Command(c, args...) + lfile := &lFile{fp: nil, pp: pp, writer: nil, reader: nil, closed: false} + ud.Value = lfile + + var err error + if writable { + lfile.writer, err = pp.StdinPipe() + } + if readable { + var reader io.Reader + reader, err = pp.StdoutPipe() + lfile.reader = bufio.NewReaderSize(reader, fileDefaultReadBuffer) + } + if err != nil { + return nil, err + } + err = pp.Start() + if err != nil { + return nil, err + } + + L.SetMetatable(ud, L.GetTypeMetatable(lFileClass)) + return ud, nil +} + +func (file *lFile) Type() lFileType { + if file.fp == nil { + return lFileProcess + } + return lFileFile +} + +func (file *lFile) Name() string { + switch file.Type() { + case lFileFile: + return fmt.Sprintf("file %s", file.fp.Name()) + case lFileProcess: + return fmt.Sprintf("process %s", file.pp.Path) + } + return "" +} + +func (file *lFile) AbandonReadBuffer() error { + if file.Type() == lFileFile && file.reader != nil { + _, err := file.fp.Seek(-int64(file.reader.Buffered()), 1) + if err != nil { + return err + } + file.reader = bufio.NewReaderSize(file.fp, fileDefaultReadBuffer) + } + return nil +} + +func fileDefOut(L *LState) *LUserData { + return L.Get(UpvalueIndex(1)).(*LTable).RawGetInt(fileDefOutIndex).(*LUserData) +} + +func fileDefIn(L *LState) *LUserData { + return L.Get(UpvalueIndex(1)).(*LTable).RawGetInt(fileDefInIndex).(*LUserData) +} + +func fileIsWritable(L *LState, file *lFile) int { + if file.writer == nil { + L.Push(LNil) + L.Push(LString(fmt.Sprintf("%s is opened for only reading.", file.Name()))) + L.Push(LNumber(1)) // C-Lua compatibility: Original Lua pushes errno to the stack + return 3 + } + return 0 +} + +func fileIsReadable(L *LState, file *lFile) int { + if file.reader == nil { + L.Push(LNil) + L.Push(LString(fmt.Sprintf("%s is opened for only writing.", file.Name()))) + L.Push(LNumber(1)) // C-Lua compatibility: Original Lua pushes errno to the stack + return 3 + } + return 0 +} + +var stdFiles = []struct { + name string + file *os.File + writable bool + readable bool +}{ + {"stdout", os.Stdout, true, false}, + {"stdin", os.Stdin, false, true}, + {"stderr", os.Stderr, true, false}, +} + +func OpenIo(L *LState) int { + mod := L.RegisterModule(IoLibName, map[string]LGFunction{}).(*LTable) + mt := L.NewTypeMetatable(lFileClass) + mt.RawSetString("__index", mt) + L.SetFuncs(mt, fileMethods) + mt.RawSetString("lines", L.NewClosure(fileLines, L.NewFunction(fileLinesIter))) + + for _, finfo := range stdFiles { + file, _ := newFile(L, finfo.file, "", 0, os.FileMode(0), finfo.writable, finfo.readable) + mod.RawSetString(finfo.name, file) + } + uv := L.CreateTable(2, 0) + uv.RawSetInt(fileDefOutIndex, mod.RawGetString("stdout")) + uv.RawSetInt(fileDefInIndex, mod.RawGetString("stdin")) + for name, fn := range ioFuncs { + mod.RawSetString(name, L.NewClosure(fn, uv)) + } + mod.RawSetString("lines", L.NewClosure(ioLines, uv, L.NewClosure(ioLinesIter, uv))) + // Modifications are being made in-place rather than returned? + L.Push(mod) + return 1 +} + +var fileMethods = map[string]LGFunction{ + "__tostring": fileToString, + "write": fileWrite, + "close": fileClose, + "flush": fileFlush, + "lines": fileLines, + "read": fileRead, + "seek": fileSeek, + "setvbuf": fileSetVBuf, +} + +func fileToString(L *LState) int { + file := checkFile(L) + if file.Type() == lFileFile { + if file.closed { + L.Push(LString("file (closed)")) + } else { + L.Push(LString("file")) + } + } else { + if file.closed { + L.Push(LString("process (closed)")) + } else { + L.Push(LString("process")) + } + } + return 1 +} + +func fileWriteAux(L *LState, file *lFile, idx int) int { + if n := fileIsWritable(L, file); n != 0 { + return n + } + errorIfFileIsClosed(L, file) + top := L.GetTop() + out := file.writer + var err error + for i := idx; i <= top; i++ { + L.CheckTypes(i, LTNumber, LTString) + s := LVAsString(L.Get(i)) + if _, err = out.Write(unsafeFastStringToReadOnlyBytes(s)); err != nil { + goto errreturn + } + } + + file.AbandonReadBuffer() + L.Push(LTrue) + return 1 +errreturn: + + file.AbandonReadBuffer() + L.Push(LNil) + L.Push(LString(err.Error())) + L.Push(LNumber(1)) // C-Lua compatibility: Original Lua pushes errno to the stack + return 3 +} + +func fileCloseAux(L *LState, file *lFile) int { + file.closed = true + var err error + if file.writer != nil { + if bwriter, ok := file.writer.(*bufio.Writer); ok { + if err = bwriter.Flush(); err != nil { + goto errreturn + } + } + } + file.AbandonReadBuffer() + + switch file.Type() { + case lFileFile: + if err = file.fp.Close(); err != nil { + goto errreturn + } + L.Push(LTrue) + return 1 + case lFileProcess: + err = file.pp.Wait() + var exitStatus int // Initialised to zero value = 0 + if err != nil { + if e2, ok := err.(*exec.ExitError); ok { + if s, ok := e2.Sys().(syscall.WaitStatus); ok { + exitStatus = s.ExitStatus() + } else { + err = errors.New("Unimplemented for system where exec.ExitError.Sys() is not syscall.WaitStatus.") + } + } + } else { + exitStatus = 0 + } + L.Push(LNumber(exitStatus)) + return 1 + } + +errreturn: + L.RaiseError(err.Error()) + return 0 +} + +func fileFlushAux(L *LState, file *lFile) int { + if n := fileIsWritable(L, file); n != 0 { + return n + } + errorIfFileIsClosed(L, file) + + if bwriter, ok := file.writer.(*bufio.Writer); ok { + if err := bwriter.Flush(); err != nil { + L.Push(LNil) + L.Push(LString(err.Error())) + return 2 + } + } + L.Push(LTrue) + return 1 +} + +func fileReadAux(L *LState, file *lFile, idx int) int { + if n := fileIsReadable(L, file); n != 0 { + return n + } + errorIfFileIsClosed(L, file) + if L.GetTop() == idx-1 { + L.Push(LString("*l")) + } + var err error + top := L.GetTop() + for i := idx; i <= top; i++ { + switch lv := L.Get(i).(type) { + case LNumber: + size := int64(lv) + if size == 0 { + _, err = file.reader.ReadByte() + if err == io.EOF { + L.Push(LNil) + goto normalreturn + } + file.reader.UnreadByte() + } + var buf []byte + var iseof bool + buf, err, iseof = readBufioSize(file.reader, size) + if iseof { + L.Push(LNil) + goto normalreturn + } + if err != nil { + goto errreturn + } + L.Push(LString(string(buf))) + case LString: + options := L.CheckString(i) + if len(options) > 0 && options[0] != '*' { + L.ArgError(2, "invalid options:"+options) + } + for _, opt := range options[1:] { + switch opt { + case 'n': + var v LNumber + _, err = fmt.Fscanf(file.reader, LNumberScanFormat, &v) + if err == io.EOF { + L.Push(LNil) + goto normalreturn + } + if err != nil { + goto errreturn + } + L.Push(v) + case 'a': + var buf []byte + buf, err = ioutil.ReadAll(file.reader) + if err == io.EOF { + L.Push(LString("")) + goto normalreturn + } + if err != nil { + goto errreturn + } + L.Push(LString(string(buf))) + case 'l': + var buf []byte + var iseof bool + buf, err, iseof = readBufioLine(file.reader) + if iseof { + L.Push(LNil) + goto normalreturn + } + if err != nil { + goto errreturn + } + L.Push(LString(string(buf))) + default: + L.ArgError(2, "invalid options:"+string(opt)) + } + } + } + } +normalreturn: + return L.GetTop() - top + +errreturn: + L.RaiseError(err.Error()) + //L.Push(LNil) + //L.Push(LString(err.Error())) + return 2 +} + +var fileSeekOptions = []string{"set", "cur", "end"} + +func fileSeek(L *LState) int { + file := checkFile(L) + if file.Type() != lFileFile { + L.Push(LNil) + L.Push(LString("can not seek a process.")) + return 2 + } + + top := L.GetTop() + if top == 1 { + L.Push(LString("cur")) + L.Push(LNumber(0)) + } else if top == 2 { + L.Push(LNumber(0)) + } + + var pos int64 + var err error + + err = file.AbandonReadBuffer() + if err != nil { + goto errreturn + } + + pos, err = file.fp.Seek(L.CheckInt64(3), L.CheckOption(2, fileSeekOptions)) + if err != nil { + goto errreturn + } + + L.Push(LNumber(pos)) + return 1 + +errreturn: + L.Push(LNil) + L.Push(LString(err.Error())) + return 2 +} + +func fileWrite(L *LState) int { + return fileWriteAux(L, checkFile(L), 2) +} + +func fileClose(L *LState) int { + return fileCloseAux(L, checkFile(L)) +} + +func fileFlush(L *LState) int { + return fileFlushAux(L, checkFile(L)) +} + +func fileLinesIter(L *LState) int { + var file *lFile + if ud, ok := L.Get(1).(*LUserData); ok { + file = ud.Value.(*lFile) + } else { + file = L.Get(UpvalueIndex(2)).(*LUserData).Value.(*lFile) + } + buf, _, err := file.reader.ReadLine() + if err != nil { + if err == io.EOF { + L.Push(LNil) + return 1 + } + L.RaiseError(err.Error()) + } + L.Push(LString(string(buf))) + return 1 +} + +func fileLines(L *LState) int { + file := checkFile(L) + ud := L.CheckUserData(1) + if n := fileIsReadable(L, file); n != 0 { + return 0 + } + L.Push(L.NewClosure(fileLinesIter, L.Get(UpvalueIndex(1)), ud)) + return 1 +} + +func fileRead(L *LState) int { + return fileReadAux(L, checkFile(L), 2) +} + +var filebufOptions = []string{"no", "full"} + +func fileSetVBuf(L *LState) int { + var err error + var writer io.Writer + file := checkFile(L) + if n := fileIsWritable(L, file); n != 0 { + return n + } + switch filebufOptions[L.CheckOption(2, filebufOptions)] { + case "no": + switch file.Type() { + case lFileFile: + file.writer = file.fp + case lFileProcess: + file.writer, err = file.pp.StdinPipe() + if err != nil { + goto errreturn + } + } + case "full", "line": // TODO line buffer not supported + bufsize := L.OptInt(3, fileDefaultWriteBuffer) + switch file.Type() { + case lFileFile: + file.writer = bufio.NewWriterSize(file.fp, bufsize) + case lFileProcess: + writer, err = file.pp.StdinPipe() + if err != nil { + goto errreturn + } + file.writer = bufio.NewWriterSize(writer, bufsize) + } + } + L.Push(LTrue) + return 1 +errreturn: + L.Push(LNil) + L.Push(LString(err.Error())) + return 2 +} + +func ioInput(L *LState) int { + if L.GetTop() == 0 { + L.Push(fileDefIn(L)) + return 1 + } + switch lv := L.Get(1).(type) { + case LString: + file, err := newFile(L, nil, string(lv), os.O_RDONLY, 0600, false, true) + if err != nil { + L.RaiseError(err.Error()) + } + L.Get(UpvalueIndex(1)).(*LTable).RawSetInt(fileDefInIndex, file) + L.Push(file) + return 1 + case *LUserData: + if _, ok := lv.Value.(*lFile); ok { + L.Get(UpvalueIndex(1)).(*LTable).RawSetInt(fileDefInIndex, lv) + L.Push(lv) + return 1 + } + + } + L.ArgError(1, "string or file expedted, but got "+L.Get(1).Type().String()) + return 0 +} + +func ioClose(L *LState) int { + if L.GetTop() == 0 { + return fileCloseAux(L, fileDefOut(L).Value.(*lFile)) + } + return fileClose(L) +} + +func ioFlush(L *LState) int { + return fileFlushAux(L, fileDefOut(L).Value.(*lFile)) +} + +func ioLinesIter(L *LState) int { + var file *lFile + toclose := false + if ud, ok := L.Get(1).(*LUserData); ok { + file = ud.Value.(*lFile) + } else { + file = L.Get(UpvalueIndex(2)).(*LUserData).Value.(*lFile) + toclose = true + } + buf, _, err := file.reader.ReadLine() + if err != nil { + if err == io.EOF { + if toclose { + fileCloseAux(L, file) + } + L.Push(LNil) + return 1 + } + L.RaiseError(err.Error()) + } + L.Push(LString(string(buf))) + return 1 +} + +func ioLines(L *LState) int { + if L.GetTop() == 0 { + L.Push(L.Get(UpvalueIndex(2))) + L.Push(fileDefIn(L)) + return 2 + } + + path := L.CheckString(1) + ud, err := newFile(L, nil, path, os.O_RDONLY, os.FileMode(0600), false, true) + if err != nil { + return 0 + } + L.Push(L.NewClosure(ioLinesIter, L.Get(UpvalueIndex(1)), ud)) + return 1 +} + +var ioOpenOpions = []string{"r", "rb", "w", "wb", "a", "ab", "r+", "rb+", "w+", "wb+", "a+", "ab+"} + +func ioOpenFile(L *LState) int { + path := L.CheckString(1) + if L.GetTop() == 1 { + L.Push(LString("r")) + } + mode := os.O_RDONLY + perm := 0600 + writable := true + readable := true + switch ioOpenOpions[L.CheckOption(2, ioOpenOpions)] { + case "r", "rb": + mode = os.O_RDONLY + writable = false + case "w", "wb": + mode = os.O_WRONLY | os.O_CREATE + readable = false + case "a", "ab": + mode = os.O_WRONLY | os.O_APPEND | os.O_CREATE + case "r+", "rb+": + mode = os.O_RDWR + case "w+", "wb+": + mode = os.O_RDWR | os.O_TRUNC | os.O_CREATE + case "a+", "ab+": + mode = os.O_APPEND | os.O_RDWR | os.O_CREATE + } + file, err := newFile(L, nil, path, mode, os.FileMode(perm), writable, readable) + if err != nil { + L.Push(LNil) + L.Push(LString(err.Error())) + L.Push(LNumber(1)) // C-Lua compatibility: Original Lua pushes errno to the stack + return 3 + } + L.Push(file) + return 1 + +} + +var ioPopenOptions = []string{"r", "w"} + +func ioPopen(L *LState) int { + cmd := L.CheckString(1) + if L.GetTop() == 1 { + L.Push(LString("r")) + } + var file *LUserData + var err error + + switch ioPopenOptions[L.CheckOption(2, ioPopenOptions)] { + case "r": + file, err = newProcess(L, cmd, false, true) + case "w": + file, err = newProcess(L, cmd, true, false) + } + if err != nil { + L.Push(LNil) + L.Push(LString(err.Error())) + return 2 + } + L.Push(file) + return 1 +} + +func ioRead(L *LState) int { + return fileReadAux(L, fileDefIn(L).Value.(*lFile), 1) +} + +func ioType(L *LState) int { + ud, udok := L.Get(1).(*LUserData) + if !udok { + L.Push(LNil) + return 1 + } + file, ok := ud.Value.(*lFile) + if !ok { + L.Push(LNil) + return 1 + } + if file.closed { + L.Push(LString("closed file")) + return 1 + } + L.Push(LString("file")) + return 1 +} + +func ioTmpFile(L *LState) int { + file, err := ioutil.TempFile("", "") + if err != nil { + L.Push(LNil) + L.Push(LString(err.Error())) + return 2 + } + L.G.tempFiles = append(L.G.tempFiles, file) + ud, _ := newFile(L, file, "", 0, os.FileMode(0), true, true) + L.Push(ud) + return 1 +} + +func ioOutput(L *LState) int { + if L.GetTop() == 0 { + L.Push(fileDefOut(L)) + return 1 + } + switch lv := L.Get(1).(type) { + case LString: + file, err := newFile(L, nil, string(lv), os.O_WRONLY|os.O_CREATE, 0600, true, false) + if err != nil { + L.RaiseError(err.Error()) + } + L.Get(UpvalueIndex(1)).(*LTable).RawSetInt(fileDefOutIndex, file) + L.Push(file) + return 1 + case *LUserData: + if _, ok := lv.Value.(*lFile); ok { + L.Get(UpvalueIndex(1)).(*LTable).RawSetInt(fileDefOutIndex, lv) + L.Push(lv) + return 1 + } + + } + L.ArgError(1, "string or file expedted, but got "+L.Get(1).Type().String()) + return 0 +} + +func ioWrite(L *LState) int { + return fileWriteAux(L, fileDefOut(L).Value.(*lFile), 1) +} + +// diff --git a/vendor/github.com/yuin/gopher-lua/linit.go b/vendor/github.com/yuin/gopher-lua/linit.go new file mode 100644 index 00000000000..cd96d660151 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/linit.go @@ -0,0 +1,54 @@ +package lua + +const ( + // BaseLibName is here for consistency; the base functions have no namespace/library. + BaseLibName = "" + // LoadLibName is here for consistency; the loading system has no namespace/library. + LoadLibName = "package" + // TabLibName is the name of the table Library. + TabLibName = "table" + // IoLibName is the name of the io Library. + IoLibName = "io" + // OsLibName is the name of the os Library. + OsLibName = "os" + // StringLibName is the name of the string Library. + StringLibName = "string" + // MathLibName is the name of the math Library. + MathLibName = "math" + // DebugLibName is the name of the debug Library. + DebugLibName = "debug" + // ChannelLibName is the name of the channel Library. + ChannelLibName = "channel" + // CoroutineLibName is the name of the coroutine Library. + CoroutineLibName = "coroutine" +) + +type luaLib struct { + libName string + libFunc LGFunction +} + +var luaLibs = []luaLib{ + luaLib{LoadLibName, OpenPackage}, + luaLib{BaseLibName, OpenBase}, + luaLib{TabLibName, OpenTable}, + luaLib{IoLibName, OpenIo}, + luaLib{OsLibName, OpenOs}, + luaLib{StringLibName, OpenString}, + luaLib{MathLibName, OpenMath}, + luaLib{DebugLibName, OpenDebug}, + luaLib{ChannelLibName, OpenChannel}, + luaLib{CoroutineLibName, OpenCoroutine}, +} + +// OpenLibs loads the built-in libraries. It is equivalent to running OpenLoad, +// then OpenBase, then iterating over the other OpenXXX functions in any order. +func (ls *LState) OpenLibs() { + // NB: Map iteration order in Go is deliberately randomised, so must open Load/Base + // prior to iterating. + for _, lib := range luaLibs { + ls.Push(ls.NewFunction(lib.libFunc)) + ls.Push(LString(lib.libName)) + ls.Call(1, 0) + } +} diff --git a/vendor/github.com/yuin/gopher-lua/loadlib.go b/vendor/github.com/yuin/gopher-lua/loadlib.go new file mode 100644 index 00000000000..dc67e20d40e --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/loadlib.go @@ -0,0 +1,125 @@ +package lua + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +/* load lib {{{ */ + +var loLoaders = []LGFunction{loLoaderPreload, loLoaderLua} + +func loGetPath(env string, defpath string) string { + path := os.Getenv(env) + if len(path) == 0 { + path = defpath + } + path = strings.Replace(path, ";;", ";"+defpath+";", -1) + if os.PathSeparator != '/' { + dir, err := filepath.Abs(filepath.Dir(os.Args[0])) + if err != nil { + panic(err) + } + path = strings.Replace(path, "!", dir, -1) + } + return path +} + +func loFindFile(L *LState, name, pname string) (string, string) { + name = strings.Replace(name, ".", string(os.PathSeparator), -1) + lv := L.GetField(L.GetField(L.Get(EnvironIndex), "package"), pname) + path, ok := lv.(LString) + if !ok { + L.RaiseError("package.%s must be a string", pname) + } + messages := []string{} + for _, pattern := range strings.Split(string(path), ";") { + luapath := strings.Replace(pattern, "?", name, -1) + if _, err := os.Stat(luapath); err == nil { + return luapath, "" + } else { + messages = append(messages, err.Error()) + } + } + return "", strings.Join(messages, "\n\t") +} + +func OpenPackage(L *LState) int { + packagemod := L.RegisterModule(LoadLibName, loFuncs) + + L.SetField(packagemod, "preload", L.NewTable()) + + loaders := L.CreateTable(len(loLoaders), 0) + for i, loader := range loLoaders { + L.RawSetInt(loaders, i+1, L.NewFunction(loader)) + } + L.SetField(packagemod, "loaders", loaders) + L.SetField(L.Get(RegistryIndex), "_LOADERS", loaders) + + loaded := L.NewTable() + L.SetField(packagemod, "loaded", loaded) + L.SetField(L.Get(RegistryIndex), "_LOADED", loaded) + + L.SetField(packagemod, "path", LString(loGetPath(LuaPath, LuaPathDefault))) + L.SetField(packagemod, "cpath", LString("")) + + L.Push(packagemod) + return 1 +} + +var loFuncs = map[string]LGFunction{ + "loadlib": loLoadLib, + "seeall": loSeeAll, +} + +func loLoaderPreload(L *LState) int { + name := L.CheckString(1) + preload := L.GetField(L.GetField(L.Get(EnvironIndex), "package"), "preload") + if _, ok := preload.(*LTable); !ok { + L.RaiseError("package.preload must be a table") + } + lv := L.GetField(preload, name) + if lv == LNil { + L.Push(LString(fmt.Sprintf("no field package.preload['%s']", name))) + return 1 + } + L.Push(lv) + return 1 +} + +func loLoaderLua(L *LState) int { + name := L.CheckString(1) + path, msg := loFindFile(L, name, "path") + if len(path) == 0 { + L.Push(LString(msg)) + return 1 + } + fn, err1 := L.LoadFile(path) + if err1 != nil { + L.RaiseError(err1.Error()) + } + L.Push(fn) + return 1 +} + +func loLoadLib(L *LState) int { + L.RaiseError("loadlib is not supported") + return 0 +} + +func loSeeAll(L *LState) int { + mod := L.CheckTable(1) + mt := L.GetMetatable(mod) + if mt == LNil { + mt = L.CreateTable(0, 1) + L.SetMetatable(mod, mt) + } + L.SetField(mt, "__index", L.Get(GlobalsIndex)) + return 0 +} + +/* }}} */ + +// diff --git a/vendor/github.com/yuin/gopher-lua/mathlib.go b/vendor/github.com/yuin/gopher-lua/mathlib.go new file mode 100644 index 00000000000..0771a48f5a7 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/mathlib.go @@ -0,0 +1,231 @@ +package lua + +import ( + "math" + "math/rand" +) + +func OpenMath(L *LState) int { + mod := L.RegisterModule(MathLibName, mathFuncs).(*LTable) + mod.RawSetString("pi", LNumber(math.Pi)) + mod.RawSetString("huge", LNumber(math.MaxFloat64)) + L.Push(mod) + return 1 +} + +var mathFuncs = map[string]LGFunction{ + "abs": mathAbs, + "acos": mathAcos, + "asin": mathAsin, + "atan": mathAtan, + "atan2": mathAtan2, + "ceil": mathCeil, + "cos": mathCos, + "cosh": mathCosh, + "deg": mathDeg, + "exp": mathExp, + "floor": mathFloor, + "fmod": mathFmod, + "frexp": mathFrexp, + "ldexp": mathLdexp, + "log": mathLog, + "log10": mathLog10, + "max": mathMax, + "min": mathMin, + "mod": mathMod, + "modf": mathModf, + "pow": mathPow, + "rad": mathRad, + "random": mathRandom, + "randomseed": mathRandomseed, + "sin": mathSin, + "sinh": mathSinh, + "sqrt": mathSqrt, + "tan": mathTan, + "tanh": mathTanh, +} + +func mathAbs(L *LState) int { + L.Push(LNumber(math.Abs(float64(L.CheckNumber(1))))) + return 1 +} + +func mathAcos(L *LState) int { + L.Push(LNumber(math.Acos(float64(L.CheckNumber(1))))) + return 1 +} + +func mathAsin(L *LState) int { + L.Push(LNumber(math.Asin(float64(L.CheckNumber(1))))) + return 1 +} + +func mathAtan(L *LState) int { + L.Push(LNumber(math.Atan(float64(L.CheckNumber(1))))) + return 1 +} + +func mathAtan2(L *LState) int { + L.Push(LNumber(math.Atan2(float64(L.CheckNumber(1)), float64(L.CheckNumber(2))))) + return 1 +} + +func mathCeil(L *LState) int { + L.Push(LNumber(math.Ceil(float64(L.CheckNumber(1))))) + return 1 +} + +func mathCos(L *LState) int { + L.Push(LNumber(math.Cos(float64(L.CheckNumber(1))))) + return 1 +} + +func mathCosh(L *LState) int { + L.Push(LNumber(math.Cosh(float64(L.CheckNumber(1))))) + return 1 +} + +func mathDeg(L *LState) int { + L.Push(LNumber(float64(L.CheckNumber(1)) * 180 / math.Pi)) + return 1 +} + +func mathExp(L *LState) int { + L.Push(LNumber(math.Exp(float64(L.CheckNumber(1))))) + return 1 +} + +func mathFloor(L *LState) int { + L.Push(LNumber(math.Floor(float64(L.CheckNumber(1))))) + return 1 +} + +func mathFmod(L *LState) int { + L.Push(LNumber(math.Mod(float64(L.CheckNumber(1)), float64(L.CheckNumber(2))))) + return 1 +} + +func mathFrexp(L *LState) int { + v1, v2 := math.Frexp(float64(L.CheckNumber(1))) + L.Push(LNumber(v1)) + L.Push(LNumber(v2)) + return 2 +} + +func mathLdexp(L *LState) int { + L.Push(LNumber(math.Ldexp(float64(L.CheckNumber(1)), L.CheckInt(2)))) + return 1 +} + +func mathLog(L *LState) int { + L.Push(LNumber(math.Log(float64(L.CheckNumber(1))))) + return 1 +} + +func mathLog10(L *LState) int { + L.Push(LNumber(math.Log10(float64(L.CheckNumber(1))))) + return 1 +} + +func mathMax(L *LState) int { + if L.GetTop() == 0 { + L.RaiseError("wrong number of arguments") + } + max := L.CheckNumber(1) + top := L.GetTop() + for i := 2; i <= top; i++ { + v := L.CheckNumber(i) + if v > max { + max = v + } + } + L.Push(max) + return 1 +} + +func mathMin(L *LState) int { + if L.GetTop() == 0 { + L.RaiseError("wrong number of arguments") + } + min := L.CheckNumber(1) + top := L.GetTop() + for i := 2; i <= top; i++ { + v := L.CheckNumber(i) + if v < min { + min = v + } + } + L.Push(min) + return 1 +} + +func mathMod(L *LState) int { + lhs := L.CheckNumber(1) + rhs := L.CheckNumber(2) + L.Push(luaModulo(lhs, rhs)) + return 1 +} + +func mathModf(L *LState) int { + v1, v2 := math.Modf(float64(L.CheckNumber(1))) + L.Push(LNumber(v1)) + L.Push(LNumber(v2)) + return 2 +} + +func mathPow(L *LState) int { + L.Push(LNumber(math.Pow(float64(L.CheckNumber(1)), float64(L.CheckNumber(2))))) + return 1 +} + +func mathRad(L *LState) int { + L.Push(LNumber(float64(L.CheckNumber(1)) * math.Pi / 180)) + return 1 +} + +func mathRandom(L *LState) int { + switch L.GetTop() { + case 0: + L.Push(LNumber(rand.Float64())) + case 1: + n := L.CheckInt(1) + L.Push(LNumber(rand.Intn(n-1) + 1)) + default: + min := L.CheckInt(1) + max := L.CheckInt(2) + 1 + L.Push(LNumber(rand.Intn(max-min) + min)) + } + return 1 +} + +func mathRandomseed(L *LState) int { + rand.Seed(L.CheckInt64(1)) + return 0 +} + +func mathSin(L *LState) int { + L.Push(LNumber(math.Sin(float64(L.CheckNumber(1))))) + return 1 +} + +func mathSinh(L *LState) int { + L.Push(LNumber(math.Sinh(float64(L.CheckNumber(1))))) + return 1 +} + +func mathSqrt(L *LState) int { + L.Push(LNumber(math.Sqrt(float64(L.CheckNumber(1))))) + return 1 +} + +func mathTan(L *LState) int { + L.Push(LNumber(math.Tan(float64(L.CheckNumber(1))))) + return 1 +} + +func mathTanh(L *LState) int { + L.Push(LNumber(math.Tanh(float64(L.CheckNumber(1))))) + return 1 +} + +// diff --git a/vendor/github.com/yuin/gopher-lua/opcode.go b/vendor/github.com/yuin/gopher-lua/opcode.go new file mode 100644 index 00000000000..91fff1c9b41 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/opcode.go @@ -0,0 +1,371 @@ +package lua + +import ( + "fmt" +) + +/* + gopherlua uses Lua 5.1.4's opcodes. + Lua 5.1.4 opcodes layout: + + instruction = 32bit(fixed length) + + +---------------------------------------------+ + |0-5(6bits)|6-13(8bit)|14-22(9bit)|23-31(9bit)| + |==========+==========+===========+===========| + | opcode | A | C | B | + |----------+----------+-----------+-----------| + | opcode | A | Bx(unsigned) | + |----------+----------+-----------+-----------| + | opcode | A | sBx(signed) | + +---------------------------------------------+ +*/ + +const opInvalidInstruction = ^uint32(0) + +const opSizeCode = 6 +const opSizeA = 8 +const opSizeB = 9 +const opSizeC = 9 +const opSizeBx = 18 +const opSizesBx = 18 + +const opMaxArgsA = (1 << opSizeA) - 1 +const opMaxArgsB = (1 << opSizeB) - 1 +const opMaxArgsC = (1 << opSizeC) - 1 +const opMaxArgBx = (1 << opSizeBx) - 1 +const opMaxArgSbx = opMaxArgBx >> 1 + +const ( + OP_MOVE int = iota /* A B R(A) := R(B) */ + OP_MOVEN /* A B R(A) := R(B); followed by R(C) MOVE ops */ + OP_LOADK /* A Bx R(A) := Kst(Bx) */ + OP_LOADBOOL /* A B C R(A) := (Bool)B; if (C) pc++ */ + OP_LOADNIL /* A B R(A) := ... := R(B) := nil */ + OP_GETUPVAL /* A B R(A) := UpValue[B] */ + + OP_GETGLOBAL /* A Bx R(A) := Gbl[Kst(Bx)] */ + OP_GETTABLE /* A B C R(A) := R(B)[RK(C)] */ + OP_GETTABLEKS /* A B C R(A) := R(B)[RK(C)] ; RK(C) is constant string */ + + OP_SETGLOBAL /* A Bx Gbl[Kst(Bx)] := R(A) */ + OP_SETUPVAL /* A B UpValue[B] := R(A) */ + OP_SETTABLE /* A B C R(A)[RK(B)] := RK(C) */ + OP_SETTABLEKS /* A B C R(A)[RK(B)] := RK(C) ; RK(B) is constant string */ + + OP_NEWTABLE /* A B C R(A) := {} (size = BC) */ + + OP_SELF /* A B C R(A+1) := R(B); R(A) := R(B)[RK(C)] */ + + OP_ADD /* A B C R(A) := RK(B) + RK(C) */ + OP_SUB /* A B C R(A) := RK(B) - RK(C) */ + OP_MUL /* A B C R(A) := RK(B) * RK(C) */ + OP_DIV /* A B C R(A) := RK(B) / RK(C) */ + OP_MOD /* A B C R(A) := RK(B) % RK(C) */ + OP_POW /* A B C R(A) := RK(B) ^ RK(C) */ + OP_UNM /* A B R(A) := -R(B) */ + OP_NOT /* A B R(A) := not R(B) */ + OP_LEN /* A B R(A) := length of R(B) */ + + OP_CONCAT /* A B C R(A) := R(B).. ... ..R(C) */ + + OP_JMP /* sBx pc+=sBx */ + + OP_EQ /* A B C if ((RK(B) == RK(C)) ~= A) then pc++ */ + OP_LT /* A B C if ((RK(B) < RK(C)) ~= A) then pc++ */ + OP_LE /* A B C if ((RK(B) <= RK(C)) ~= A) then pc++ */ + + OP_TEST /* A C if not (R(A) <=> C) then pc++ */ + OP_TESTSET /* A B C if (R(B) <=> C) then R(A) := R(B) else pc++ */ + + OP_CALL /* A B C R(A) ... R(A+C-2) := R(A)(R(A+1) ... R(A+B-1)) */ + OP_TAILCALL /* A B C return R(A)(R(A+1) ... R(A+B-1)) */ + OP_RETURN /* A B return R(A) ... R(A+B-2) (see note) */ + + OP_FORLOOP /* A sBx R(A)+=R(A+2); + if R(A) =) R(A)*/ + OP_CLOSURE /* A Bx R(A) := closure(KPROTO[Bx] R(A) ... R(A+n)) */ + + OP_VARARG /* A B R(A) R(A+1) ... R(A+B-1) = vararg */ + + OP_NOP /* NOP */ +) +const opCodeMax = OP_NOP + +type opArgMode int + +const ( + opArgModeN opArgMode = iota + opArgModeU + opArgModeR + opArgModeK +) + +type opType int + +const ( + opTypeABC = iota + opTypeABx + opTypeASbx +) + +type opProp struct { + Name string + IsTest bool + SetRegA bool + ModeArgB opArgMode + ModeArgC opArgMode + Type opType +} + +var opProps = []opProp{ + opProp{"MOVE", false, true, opArgModeR, opArgModeN, opTypeABC}, + opProp{"MOVEN", false, true, opArgModeR, opArgModeN, opTypeABC}, + opProp{"LOADK", false, true, opArgModeK, opArgModeN, opTypeABx}, + opProp{"LOADBOOL", false, true, opArgModeU, opArgModeU, opTypeABC}, + opProp{"LOADNIL", false, true, opArgModeR, opArgModeN, opTypeABC}, + opProp{"GETUPVAL", false, true, opArgModeU, opArgModeN, opTypeABC}, + opProp{"GETGLOBAL", false, true, opArgModeK, opArgModeN, opTypeABx}, + opProp{"GETTABLE", false, true, opArgModeR, opArgModeK, opTypeABC}, + opProp{"GETTABLEKS", false, true, opArgModeR, opArgModeK, opTypeABC}, + opProp{"SETGLOBAL", false, false, opArgModeK, opArgModeN, opTypeABx}, + opProp{"SETUPVAL", false, false, opArgModeU, opArgModeN, opTypeABC}, + opProp{"SETTABLE", false, false, opArgModeK, opArgModeK, opTypeABC}, + opProp{"SETTABLEKS", false, false, opArgModeK, opArgModeK, opTypeABC}, + opProp{"NEWTABLE", false, true, opArgModeU, opArgModeU, opTypeABC}, + opProp{"SELF", false, true, opArgModeR, opArgModeK, opTypeABC}, + opProp{"ADD", false, true, opArgModeK, opArgModeK, opTypeABC}, + opProp{"SUB", false, true, opArgModeK, opArgModeK, opTypeABC}, + opProp{"MUL", false, true, opArgModeK, opArgModeK, opTypeABC}, + opProp{"DIV", false, true, opArgModeK, opArgModeK, opTypeABC}, + opProp{"MOD", false, true, opArgModeK, opArgModeK, opTypeABC}, + opProp{"POW", false, true, opArgModeK, opArgModeK, opTypeABC}, + opProp{"UNM", false, true, opArgModeR, opArgModeN, opTypeABC}, + opProp{"NOT", false, true, opArgModeR, opArgModeN, opTypeABC}, + opProp{"LEN", false, true, opArgModeR, opArgModeN, opTypeABC}, + opProp{"CONCAT", false, true, opArgModeR, opArgModeR, opTypeABC}, + opProp{"JMP", false, false, opArgModeR, opArgModeN, opTypeASbx}, + opProp{"EQ", true, false, opArgModeK, opArgModeK, opTypeABC}, + opProp{"LT", true, false, opArgModeK, opArgModeK, opTypeABC}, + opProp{"LE", true, false, opArgModeK, opArgModeK, opTypeABC}, + opProp{"TEST", true, true, opArgModeR, opArgModeU, opTypeABC}, + opProp{"TESTSET", true, true, opArgModeR, opArgModeU, opTypeABC}, + opProp{"CALL", false, true, opArgModeU, opArgModeU, opTypeABC}, + opProp{"TAILCALL", false, true, opArgModeU, opArgModeU, opTypeABC}, + opProp{"RETURN", false, false, opArgModeU, opArgModeN, opTypeABC}, + opProp{"FORLOOP", false, true, opArgModeR, opArgModeN, opTypeASbx}, + opProp{"FORPREP", false, true, opArgModeR, opArgModeN, opTypeASbx}, + opProp{"TFORLOOP", true, false, opArgModeN, opArgModeU, opTypeABC}, + opProp{"SETLIST", false, false, opArgModeU, opArgModeU, opTypeABC}, + opProp{"CLOSE", false, false, opArgModeN, opArgModeN, opTypeABC}, + opProp{"CLOSURE", false, true, opArgModeU, opArgModeN, opTypeABx}, + opProp{"VARARG", false, true, opArgModeU, opArgModeN, opTypeABC}, + opProp{"NOP", false, false, opArgModeR, opArgModeN, opTypeASbx}, +} + +func opGetOpCode(inst uint32) int { + return int(inst >> 26) +} + +func opSetOpCode(inst *uint32, opcode int) { + *inst = (*inst & 0x3ffffff) | uint32(opcode<<26) +} + +func opGetArgA(inst uint32) int { + return int(inst>>18) & 0xff +} + +func opSetArgA(inst *uint32, arg int) { + *inst = (*inst & 0xfc03ffff) | uint32((arg&0xff)<<18) +} + +func opGetArgB(inst uint32) int { + return int(inst & 0x1ff) +} + +func opSetArgB(inst *uint32, arg int) { + *inst = (*inst & 0xfffffe00) | uint32(arg&0x1ff) +} + +func opGetArgC(inst uint32) int { + return int(inst>>9) & 0x1ff +} + +func opSetArgC(inst *uint32, arg int) { + *inst = (*inst & 0xfffc01ff) | uint32((arg&0x1ff)<<9) +} + +func opGetArgBx(inst uint32) int { + return int(inst & 0x3ffff) +} + +func opSetArgBx(inst *uint32, arg int) { + *inst = (*inst & 0xfffc0000) | uint32(arg&0x3ffff) +} + +func opGetArgSbx(inst uint32) int { + return opGetArgBx(inst) - opMaxArgSbx +} + +func opSetArgSbx(inst *uint32, arg int) { + opSetArgBx(inst, arg+opMaxArgSbx) +} + +func opCreateABC(op int, a int, b int, c int) uint32 { + var inst uint32 = 0 + opSetOpCode(&inst, op) + opSetArgA(&inst, a) + opSetArgB(&inst, b) + opSetArgC(&inst, c) + return inst +} + +func opCreateABx(op int, a int, bx int) uint32 { + var inst uint32 = 0 + opSetOpCode(&inst, op) + opSetArgA(&inst, a) + opSetArgBx(&inst, bx) + return inst +} + +func opCreateASbx(op int, a int, sbx int) uint32 { + var inst uint32 = 0 + opSetOpCode(&inst, op) + opSetArgA(&inst, a) + opSetArgSbx(&inst, sbx) + return inst +} + +const opBitRk = 1 << (opSizeB - 1) +const opMaxIndexRk = opBitRk - 1 + +func opIsK(value int) bool { + return bool((value & opBitRk) != 0) +} + +func opIndexK(value int) int { + return value & ^opBitRk +} + +func opRkAsk(value int) int { + return value | opBitRk +} + +func opToString(inst uint32) string { + op := opGetOpCode(inst) + if op > opCodeMax { + return "" + } + prop := &(opProps[op]) + + arga := opGetArgA(inst) + argb := opGetArgB(inst) + argc := opGetArgC(inst) + argbx := opGetArgBx(inst) + argsbx := opGetArgSbx(inst) + + buf := "" + switch prop.Type { + case opTypeABC: + buf = fmt.Sprintf("%s | %d, %d, %d", prop.Name, arga, argb, argc) + case opTypeABx: + buf = fmt.Sprintf("%s | %d, %d", prop.Name, arga, argbx) + case opTypeASbx: + buf = fmt.Sprintf("%s | %d, %d", prop.Name, arga, argsbx) + } + + switch op { + case OP_MOVE: + buf += fmt.Sprintf("; R(%v) := R(%v)", arga, argb) + case OP_MOVEN: + buf += fmt.Sprintf("; R(%v) := R(%v); followed by %v MOVE ops", arga, argb, argc) + case OP_LOADK: + buf += fmt.Sprintf("; R(%v) := Kst(%v)", arga, argbx) + case OP_LOADBOOL: + buf += fmt.Sprintf("; R(%v) := (Bool)%v; if (%v) pc++", arga, argb, argc) + case OP_LOADNIL: + buf += fmt.Sprintf("; R(%v) := ... := R(%v) := nil", arga, argb) + case OP_GETUPVAL: + buf += fmt.Sprintf("; R(%v) := UpValue[%v]", arga, argb) + case OP_GETGLOBAL: + buf += fmt.Sprintf("; R(%v) := Gbl[Kst(%v)]", arga, argbx) + case OP_GETTABLE: + buf += fmt.Sprintf("; R(%v) := R(%v)[RK(%v)]", arga, argb, argc) + case OP_GETTABLEKS: + buf += fmt.Sprintf("; R(%v) := R(%v)[RK(%v)] ; RK(%v) is constant string", arga, argb, argc, argc) + case OP_SETGLOBAL: + buf += fmt.Sprintf("; Gbl[Kst(%v)] := R(%v)", argbx, arga) + case OP_SETUPVAL: + buf += fmt.Sprintf("; UpValue[%v] := R(%v)", argb, arga) + case OP_SETTABLE: + buf += fmt.Sprintf("; R(%v)[RK(%v)] := RK(%v)", arga, argb, argc) + case OP_SETTABLEKS: + buf += fmt.Sprintf("; R(%v)[RK(%v)] := RK(%v) ; RK(%v) is constant string", arga, argb, argc, argb) + case OP_NEWTABLE: + buf += fmt.Sprintf("; R(%v) := {} (size = BC)", arga) + case OP_SELF: + buf += fmt.Sprintf("; R(%v+1) := R(%v); R(%v) := R(%v)[RK(%v)]", arga, argb, arga, argb, argc) + case OP_ADD: + buf += fmt.Sprintf("; R(%v) := RK(%v) + RK(%v)", arga, argb, argc) + case OP_SUB: + buf += fmt.Sprintf("; R(%v) := RK(%v) - RK(%v)", arga, argb, argc) + case OP_MUL: + buf += fmt.Sprintf("; R(%v) := RK(%v) * RK(%v)", arga, argb, argc) + case OP_DIV: + buf += fmt.Sprintf("; R(%v) := RK(%v) / RK(%v)", arga, argb, argc) + case OP_MOD: + buf += fmt.Sprintf("; R(%v) := RK(%v) %% RK(%v)", arga, argb, argc) + case OP_POW: + buf += fmt.Sprintf("; R(%v) := RK(%v) ^ RK(%v)", arga, argb, argc) + case OP_UNM: + buf += fmt.Sprintf("; R(%v) := -R(%v)", arga, argb) + case OP_NOT: + buf += fmt.Sprintf("; R(%v) := not R(%v)", arga, argb) + case OP_LEN: + buf += fmt.Sprintf("; R(%v) := length of R(%v)", arga, argb) + case OP_CONCAT: + buf += fmt.Sprintf("; R(%v) := R(%v).. ... ..R(%v)", arga, argb, argc) + case OP_JMP: + buf += fmt.Sprintf("; pc+=%v", argsbx) + case OP_EQ: + buf += fmt.Sprintf("; if ((RK(%v) == RK(%v)) ~= %v) then pc++", argb, argc, arga) + case OP_LT: + buf += fmt.Sprintf("; if ((RK(%v) < RK(%v)) ~= %v) then pc++", argb, argc, arga) + case OP_LE: + buf += fmt.Sprintf("; if ((RK(%v) <= RK(%v)) ~= %v) then pc++", argb, argc, arga) + case OP_TEST: + buf += fmt.Sprintf("; if not (R(%v) <=> %v) then pc++", arga, argc) + case OP_TESTSET: + buf += fmt.Sprintf("; if (R(%v) <=> %v) then R(%v) := R(%v) else pc++", argb, argc, arga, argb) + case OP_CALL: + buf += fmt.Sprintf("; R(%v) ... R(%v+%v-2) := R(%v)(R(%v+1) ... R(%v+%v-1))", arga, arga, argc, arga, arga, arga, argb) + case OP_TAILCALL: + buf += fmt.Sprintf("; return R(%v)(R(%v+1) ... R(%v+%v-1))", arga, arga, arga, argb) + case OP_RETURN: + buf += fmt.Sprintf("; return R(%v) ... R(%v+%v-2)", arga, arga, argb) + case OP_FORLOOP: + buf += fmt.Sprintf("; R(%v)+=R(%v+2); if R(%v) =) R(%v)", arga) + case OP_CLOSURE: + buf += fmt.Sprintf("; R(%v) := closure(KPROTO[%v] R(%v) ... R(%v+n))", arga, argbx, arga, arga) + case OP_VARARG: + buf += fmt.Sprintf("; R(%v) R(%v+1) ... R(%v+%v-1) = vararg", arga, arga, arga, argb) + case OP_NOP: + /* nothing to do */ + } + return buf +} diff --git a/vendor/github.com/yuin/gopher-lua/oslib.go b/vendor/github.com/yuin/gopher-lua/oslib.go new file mode 100644 index 00000000000..2e4bd90159c --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/oslib.go @@ -0,0 +1,206 @@ +package lua + +import ( + "io/ioutil" + "os" + "strings" + "time" +) + +var startedAt time.Time + +func init() { + startedAt = time.Now() +} + +func getIntField(L *LState, tb *LTable, key string, v int) int { + ret := tb.RawGetString(key) + if ln, ok := ret.(LNumber); ok { + return int(ln) + } + return v +} + +func getBoolField(L *LState, tb *LTable, key string, v bool) bool { + ret := tb.RawGetString(key) + if lb, ok := ret.(LBool); ok { + return bool(lb) + } + return v +} + +func OpenOs(L *LState) int { + osmod := L.RegisterModule(OsLibName, osFuncs) + L.Push(osmod) + return 1 +} + +var osFuncs = map[string]LGFunction{ + "clock": osClock, + "difftime": osDiffTime, + "execute": osExecute, + "exit": osExit, + "date": osDate, + "getenv": osGetEnv, + "remove": osRemove, + "rename": osRename, + "setenv": osSetEnv, + "setlocale": osSetLocale, + "time": osTime, + "tmpname": osTmpname, +} + +func osClock(L *LState) int { + L.Push(LNumber(float64(time.Now().Sub(startedAt)) / float64(time.Second))) + return 1 +} + +func osDiffTime(L *LState) int { + L.Push(LNumber(L.CheckInt64(1) - L.CheckInt64(2))) + return 1 +} + +func osExecute(L *LState) int { + var procAttr os.ProcAttr + procAttr.Files = []*os.File{os.Stdin, os.Stdout, os.Stderr} + cmd, args := popenArgs(L.CheckString(1)) + args = append([]string{cmd}, args...) + process, err := os.StartProcess(cmd, args, &procAttr) + if err != nil { + L.Push(LNumber(1)) + return 1 + } + + ps, err := process.Wait() + if err != nil || !ps.Success() { + L.Push(LNumber(1)) + return 1 + } + L.Push(LNumber(0)) + return 1 +} + +func osExit(L *LState) int { + L.Close() + os.Exit(L.OptInt(1, 0)) + return 1 +} + +func osDate(L *LState) int { + t := time.Now() + cfmt := "%c" + if L.GetTop() >= 1 { + cfmt = L.CheckString(1) + if strings.HasPrefix(cfmt, "!") { + t = time.Now().UTC() + cfmt = strings.TrimLeft(cfmt, "!") + } + if L.GetTop() >= 2 { + t = time.Unix(L.CheckInt64(2), 0) + } + if strings.HasPrefix(cfmt, "*t") { + ret := L.NewTable() + ret.RawSetString("year", LNumber(t.Year())) + ret.RawSetString("month", LNumber(t.Month())) + ret.RawSetString("day", LNumber(t.Day())) + ret.RawSetString("hour", LNumber(t.Hour())) + ret.RawSetString("min", LNumber(t.Minute())) + ret.RawSetString("sec", LNumber(t.Second())) + ret.RawSetString("wday", LNumber(t.Weekday())) + // TODO yday & dst + ret.RawSetString("yday", LNumber(0)) + ret.RawSetString("isdst", LFalse) + L.Push(ret) + return 1 + } + } + L.Push(LString(strftime(t, cfmt))) + return 1 +} + +func osGetEnv(L *LState) int { + v := os.Getenv(L.CheckString(1)) + if len(v) == 0 { + L.Push(LNil) + } else { + L.Push(LString(v)) + } + return 1 +} + +func osRemove(L *LState) int { + err := os.Remove(L.CheckString(1)) + if err != nil { + L.Push(LNil) + L.Push(LString(err.Error())) + return 2 + } else { + L.Push(LTrue) + return 1 + } +} + +func osRename(L *LState) int { + err := os.Rename(L.CheckString(1), L.CheckString(2)) + if err != nil { + L.Push(LNil) + L.Push(LString(err.Error())) + return 2 + } else { + L.Push(LTrue) + return 1 + } +} + +func osSetLocale(L *LState) int { + // setlocale is not supported + L.Push(LFalse) + return 1 +} + +func osSetEnv(L *LState) int { + err := os.Setenv(L.CheckString(1), L.CheckString(2)) + if err != nil { + L.Push(LNil) + L.Push(LString(err.Error())) + return 2 + } else { + L.Push(LTrue) + return 1 + } +} + +func osTime(L *LState) int { + if L.GetTop() == 0 { + L.Push(LNumber(time.Now().Unix())) + } else { + tbl := L.CheckTable(1) + sec := getIntField(L, tbl, "sec", 0) + min := getIntField(L, tbl, "min", 0) + hour := getIntField(L, tbl, "hour", 12) + day := getIntField(L, tbl, "day", -1) + month := getIntField(L, tbl, "month", -1) + year := getIntField(L, tbl, "year", -1) + isdst := getBoolField(L, tbl, "isdst", false) + t := time.Date(year, time.Month(month), day, hour, min, sec, 0, time.Local) + // TODO dst + if false { + print(isdst) + } + L.Push(LNumber(t.Unix())) + } + return 1 +} + +func osTmpname(L *LState) int { + file, err := ioutil.TempFile("", "") + if err != nil { + L.RaiseError("unable to generate a unique filename") + } + file.Close() + os.Remove(file.Name()) // ignore errors + L.Push(LString(file.Name())) + return 1 +} + +// diff --git a/vendor/github.com/yuin/gopher-lua/package.go b/vendor/github.com/yuin/gopher-lua/package.go new file mode 100644 index 00000000000..c6010ece869 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/package.go @@ -0,0 +1,7 @@ +// GopherLua: VM and compiler for Lua in Go +package lua + +const PackageName = "GopherLua" +const PackageVersion = "0.1" +const PackageAuthors = "Yusuke Inuzuka" +const PackageCopyRight = PackageName + " " + PackageVersion + " Copyright (C) 2015 " + PackageAuthors diff --git a/vendor/github.com/yuin/gopher-lua/parse/Makefile b/vendor/github.com/yuin/gopher-lua/parse/Makefile new file mode 100644 index 00000000000..b5b69096086 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/parse/Makefile @@ -0,0 +1,4 @@ +all : parser.go + +parser.go : parser.go.y + go tool yacc -o $@ parser.go.y; [ -f y.output ] && ( rm -f y.output ) diff --git a/vendor/github.com/yuin/gopher-lua/parse/lexer.go b/vendor/github.com/yuin/gopher-lua/parse/lexer.go new file mode 100644 index 00000000000..648ca87efb0 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/parse/lexer.go @@ -0,0 +1,533 @@ +package parse + +import ( + "bufio" + "bytes" + "fmt" + "github.com/yuin/gopher-lua/ast" + "io" + "reflect" + "strconv" + "strings" +) + +const EOF = -1 +const whitespace1 = 1<<'\t' | 1<<'\r' | 1<<' ' +const whitespace2 = 1<<'\t' | 1<<'\n' | 1<<'\r' | 1<<' ' + +type Error struct { + Pos ast.Position + Message string + Token string +} + +func (e *Error) Error() string { + pos := e.Pos + if pos.Line == EOF { + return fmt.Sprintf("%v at EOF: %s\n", pos.Source, e.Message) + } else { + return fmt.Sprintf("%v line:%d(column:%d) near '%v': %s\n", pos.Source, pos.Line, pos.Column, e.Token, e.Message) + } +} + +func writeChar(buf *bytes.Buffer, c int) { buf.WriteByte(byte(c)) } + +func isDecimal(ch int) bool { return '0' <= ch && ch <= '9' } + +func isIdent(ch int, pos int) bool { + return ch == '_' || 'A' <= ch && ch <= 'Z' || 'a' <= ch && ch <= 'z' || isDecimal(ch) && pos > 0 +} + +func isDigit(ch int) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +type Scanner struct { + Pos ast.Position + reader *bufio.Reader +} + +func NewScanner(reader io.Reader, source string) *Scanner { + return &Scanner{ + Pos: ast.Position{source, 1, 0}, + reader: bufio.NewReaderSize(reader, 4096), + } +} + +func (sc *Scanner) Error(tok string, msg string) *Error { return &Error{sc.Pos, msg, tok} } + +func (sc *Scanner) TokenError(tok ast.Token, msg string) *Error { return &Error{tok.Pos, msg, tok.Str} } + +func (sc *Scanner) readNext() int { + ch, err := sc.reader.ReadByte() + if err == io.EOF { + return EOF + } + return int(ch) +} + +func (sc *Scanner) Newline(ch int) { + if ch < 0 { + return + } + sc.Pos.Line += 1 + sc.Pos.Column = 0 + next := sc.Peek() + if ch == '\n' && next == '\r' || ch == '\r' && next == '\n' { + sc.reader.ReadByte() + } +} + +func (sc *Scanner) Next() int { + ch := sc.readNext() + switch ch { + case '\n', '\r': + sc.Newline(ch) + ch = int('\n') + case EOF: + sc.Pos.Line = EOF + sc.Pos.Column = 0 + default: + sc.Pos.Column++ + } + return ch +} + +func (sc *Scanner) Peek() int { + ch := sc.readNext() + if ch != EOF { + sc.reader.UnreadByte() + } + return ch +} + +func (sc *Scanner) skipWhiteSpace(whitespace int64) int { + ch := sc.Next() + for ; whitespace&(1<': + if sc.Peek() == '=' { + tok.Type = TGte + tok.Str = ">=" + sc.Next() + } else { + tok.Type = ch + tok.Str = string(ch) + } + case '.': + ch2 := sc.Peek() + switch { + case isDecimal(ch2): + tok.Type = TNumber + err = sc.scanNumber(ch, buf) + tok.Str = buf.String() + case ch2 == '.': + writeChar(buf, ch) + writeChar(buf, sc.Next()) + if sc.Peek() == '.' { + writeChar(buf, sc.Next()) + tok.Type = T3Comma + } else { + tok.Type = T2Comma + } + default: + tok.Type = '.' + } + tok.Str = buf.String() + case '+', '*', '/', '%', '^', '#', '(', ')', '{', '}', ']', ';', ':', ',': + tok.Type = ch + tok.Str = string(ch) + default: + writeChar(buf, ch) + err = sc.Error(buf.String(), "Invalid token") + goto finally + } + } + +finally: + tok.Name = TokenName(int(tok.Type)) + return tok, err +} + +// yacc interface {{{ + +type Lexer struct { + scanner *Scanner + Stmts []ast.Stmt + PNewLine bool + Token ast.Token +} + +func (lx *Lexer) Lex(lval *yySymType) int { + tok, err := lx.scanner.Scan(lx) + if err != nil { + panic(err) + } + if tok.Type < 0 { + return 0 + } + lval.token = tok + lx.Token = tok + return int(tok.Type) +} + +func (lx *Lexer) Error(message string) { + panic(lx.scanner.Error(lx.Token.Str, message)) +} + +func (lx *Lexer) TokenError(tok ast.Token, message string) { + panic(lx.scanner.TokenError(tok, message)) +} + +func Parse(reader io.Reader, name string) (chunk []ast.Stmt, err error) { + lexer := &Lexer{NewScanner(reader, name), nil, false, ast.Token{Str: ""}} + chunk = nil + defer func() { + if e := recover(); e != nil { + err, _ = e.(error) + } + }() + yyParse(lexer) + chunk = lexer.Stmts + return +} + +// }}} + +// Dump {{{ + +func isInlineDumpNode(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Struct, reflect.Slice, reflect.Interface, reflect.Ptr: + return false + default: + return true + } +} + +func dump(node interface{}, level int, s string) string { + rt := reflect.TypeOf(node) + if fmt.Sprint(rt) == "" { + return strings.Repeat(s, level) + "" + } + + rv := reflect.ValueOf(node) + buf := []string{} + switch rt.Kind() { + case reflect.Slice: + if rv.Len() == 0 { + return strings.Repeat(s, level) + "" + } + for i := 0; i < rv.Len(); i++ { + buf = append(buf, dump(rv.Index(i).Interface(), level, s)) + } + case reflect.Ptr: + vt := rv.Elem() + tt := rt.Elem() + indicies := []int{} + for i := 0; i < tt.NumField(); i++ { + if strings.Index(tt.Field(i).Name, "Base") > -1 { + continue + } + indicies = append(indicies, i) + } + switch { + case len(indicies) == 0: + return strings.Repeat(s, level) + "" + case len(indicies) == 1 && isInlineDumpNode(vt.Field(indicies[0])): + for _, i := range indicies { + buf = append(buf, strings.Repeat(s, level)+"- Node$"+tt.Name()+": "+dump(vt.Field(i).Interface(), 0, s)) + } + default: + buf = append(buf, strings.Repeat(s, level)+"- Node$"+tt.Name()) + for _, i := range indicies { + if isInlineDumpNode(vt.Field(i)) { + inf := dump(vt.Field(i).Interface(), 0, s) + buf = append(buf, strings.Repeat(s, level+1)+tt.Field(i).Name+": "+inf) + } else { + buf = append(buf, strings.Repeat(s, level+1)+tt.Field(i).Name+": ") + buf = append(buf, dump(vt.Field(i).Interface(), level+2, s)) + } + } + } + default: + buf = append(buf, strings.Repeat(s, level)+fmt.Sprint(node)) + } + return strings.Join(buf, "\n") +} + +func Dump(chunk []ast.Stmt) string { + return dump(chunk, 0, " ") +} + +// }} diff --git a/vendor/github.com/yuin/gopher-lua/parse/parser.go b/vendor/github.com/yuin/gopher-lua/parse/parser.go new file mode 100644 index 00000000000..f8f59b36154 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/parse/parser.go @@ -0,0 +1,1137 @@ +//line parser.go.y:2 +package parse + +import __yyfmt__ "fmt" + +//line parser.go.y:2 +import ( + "github.com/yuin/gopher-lua/ast" +) + +//line parser.go.y:34 +type yySymType struct { + yys int + token ast.Token + + stmts []ast.Stmt + stmt ast.Stmt + + funcname *ast.FuncName + funcexpr *ast.FunctionExpr + + exprlist []ast.Expr + expr ast.Expr + + fieldlist []*ast.Field + field *ast.Field + fieldsep string + + namelist []string + parlist *ast.ParList +} + +const TAnd = 57346 +const TBreak = 57347 +const TDo = 57348 +const TElse = 57349 +const TElseIf = 57350 +const TEnd = 57351 +const TFalse = 57352 +const TFor = 57353 +const TFunction = 57354 +const TIf = 57355 +const TIn = 57356 +const TLocal = 57357 +const TNil = 57358 +const TNot = 57359 +const TOr = 57360 +const TReturn = 57361 +const TRepeat = 57362 +const TThen = 57363 +const TTrue = 57364 +const TUntil = 57365 +const TWhile = 57366 +const TEqeq = 57367 +const TNeq = 57368 +const TLte = 57369 +const TGte = 57370 +const T2Comma = 57371 +const T3Comma = 57372 +const TIdent = 57373 +const TNumber = 57374 +const TString = 57375 +const UNARY = 57376 + +var yyToknames = []string{ + "TAnd", + "TBreak", + "TDo", + "TElse", + "TElseIf", + "TEnd", + "TFalse", + "TFor", + "TFunction", + "TIf", + "TIn", + "TLocal", + "TNil", + "TNot", + "TOr", + "TReturn", + "TRepeat", + "TThen", + "TTrue", + "TUntil", + "TWhile", + "TEqeq", + "TNeq", + "TLte", + "TGte", + "T2Comma", + "T3Comma", + "TIdent", + "TNumber", + "TString", + " {", + " (", + " >", + " <", + " +", + " -", + " *", + " /", + " %", + "UNARY", + " ^", +} +var yyStatenames = []string{} + +const yyEofCode = 1 +const yyErrCode = 2 +const yyMaxDepth = 200 + +//line parser.go.y:514 +func TokenName(c int) string { + if c >= TAnd && c-TAnd < len(yyToknames) { + if yyToknames[c-TAnd] != "" { + return yyToknames[c-TAnd] + } + } + return string([]byte{byte(c)}) +} + +//line yacctab:1 +var yyExca = []int{ + -1, 1, + 1, -1, + -2, 0, + -1, 17, + 46, 31, + 47, 31, + -2, 68, + -1, 93, + 46, 32, + 47, 32, + -2, 68, +} + +const yyNprod = 95 +const yyPrivate = 57344 + +var yyTokenNames []string +var yyStates []string + +const yyLast = 579 + +var yyAct = []int{ + + 24, 88, 50, 23, 45, 84, 56, 65, 137, 153, + 136, 113, 52, 142, 54, 53, 33, 134, 65, 132, + 62, 63, 32, 61, 108, 109, 48, 111, 106, 41, + 42, 105, 49, 155, 166, 81, 82, 83, 138, 104, + 22, 91, 131, 80, 95, 92, 162, 74, 48, 85, + 150, 99, 165, 148, 49, 149, 75, 76, 77, 78, + 79, 67, 80, 107, 106, 148, 114, 115, 116, 117, + 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, + 128, 129, 72, 73, 71, 70, 74, 65, 39, 40, + 47, 139, 133, 68, 69, 75, 76, 77, 78, 79, + 60, 80, 141, 144, 143, 146, 145, 31, 67, 147, + 9, 48, 110, 97, 48, 152, 151, 49, 38, 62, + 49, 17, 66, 77, 78, 79, 96, 80, 59, 72, + 73, 71, 70, 74, 154, 102, 91, 156, 55, 157, + 68, 69, 75, 76, 77, 78, 79, 21, 80, 187, + 94, 20, 26, 184, 37, 179, 163, 112, 25, 35, + 178, 93, 170, 172, 27, 171, 164, 173, 19, 159, + 175, 174, 29, 89, 28, 39, 40, 20, 182, 181, + 100, 34, 135, 183, 67, 39, 40, 47, 186, 64, + 51, 1, 90, 87, 36, 130, 86, 30, 66, 18, + 46, 44, 43, 8, 58, 72, 73, 71, 70, 74, + 57, 67, 168, 169, 167, 3, 68, 69, 75, 76, + 77, 78, 79, 160, 80, 66, 4, 2, 0, 0, + 0, 158, 72, 73, 71, 70, 74, 0, 0, 0, + 0, 0, 0, 68, 69, 75, 76, 77, 78, 79, + 26, 80, 37, 0, 0, 0, 25, 35, 140, 0, + 0, 0, 27, 0, 0, 0, 0, 0, 0, 0, + 29, 21, 28, 39, 40, 20, 26, 0, 37, 34, + 0, 0, 25, 35, 0, 0, 0, 0, 27, 0, + 0, 0, 36, 98, 0, 0, 29, 89, 28, 39, + 40, 20, 26, 0, 37, 34, 0, 0, 25, 35, + 0, 0, 0, 0, 27, 67, 90, 176, 36, 0, + 0, 0, 29, 21, 28, 39, 40, 20, 0, 66, + 0, 34, 0, 0, 0, 0, 72, 73, 71, 70, + 74, 0, 67, 0, 36, 0, 0, 68, 69, 75, + 76, 77, 78, 79, 0, 80, 66, 0, 177, 0, + 0, 0, 0, 72, 73, 71, 70, 74, 0, 67, + 0, 185, 0, 0, 68, 69, 75, 76, 77, 78, + 79, 0, 80, 66, 0, 161, 0, 0, 0, 0, + 72, 73, 71, 70, 74, 0, 67, 0, 0, 0, + 0, 68, 69, 75, 76, 77, 78, 79, 0, 80, + 66, 0, 0, 180, 0, 0, 0, 72, 73, 71, + 70, 74, 0, 67, 0, 0, 0, 0, 68, 69, + 75, 76, 77, 78, 79, 0, 80, 66, 0, 0, + 103, 0, 0, 0, 72, 73, 71, 70, 74, 0, + 67, 0, 101, 0, 0, 68, 69, 75, 76, 77, + 78, 79, 0, 80, 66, 0, 0, 0, 0, 0, + 0, 72, 73, 71, 70, 74, 0, 67, 0, 0, + 0, 0, 68, 69, 75, 76, 77, 78, 79, 0, + 80, 66, 0, 0, 0, 0, 0, 0, 72, 73, + 71, 70, 74, 0, 0, 0, 0, 0, 0, 68, + 69, 75, 76, 77, 78, 79, 0, 80, 72, 73, + 71, 70, 74, 0, 0, 0, 0, 0, 0, 68, + 69, 75, 76, 77, 78, 79, 0, 80, 7, 10, + 0, 0, 0, 0, 14, 15, 13, 0, 16, 0, + 0, 0, 6, 12, 0, 0, 0, 11, 0, 0, + 0, 0, 0, 0, 21, 0, 0, 0, 20, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 5, +} +var yyPact = []int{ + + -1000, -1000, 533, -5, -1000, -1000, 292, -1000, -17, 152, + -1000, 292, -1000, 292, 107, 97, 88, -1000, -1000, -1000, + 292, -1000, -1000, -29, 473, -1000, -1000, -1000, -1000, -1000, + -1000, 152, -1000, -1000, 292, 292, 292, 14, -1000, -1000, + 142, 292, 116, 292, 95, -1000, 82, 240, -1000, -1000, + 171, -1000, 446, 112, 419, -7, 17, 14, -24, -1000, + 81, -19, -1000, 104, -42, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, -1, -1, -1, -1000, -11, -1000, -37, -1000, -8, + 292, 473, -29, -1000, 152, 207, -1000, 55, -1000, -40, + -1000, -1000, 292, -1000, 292, 292, 34, -1000, 24, 19, + 14, 292, -1000, -1000, 473, 57, 493, 18, 18, 18, + 18, 18, 18, 18, 83, 83, -1, -1, -1, -1, + -44, -1000, -1000, -14, -1000, 266, -1000, -1000, 292, 180, + -1000, -1000, -1000, 160, 473, -1000, 338, 40, -1000, -1000, + -1000, -1000, -29, -1000, 157, 22, -1000, 473, -12, -1000, + 205, 292, -1000, 154, -1000, -1000, 292, -1000, -1000, 292, + 311, 151, -1000, 473, 146, 392, -1000, 292, -1000, -1000, + -1000, 144, 365, -1000, -1000, -1000, 140, -1000, +} +var yyPgo = []int{ + + 0, 190, 227, 2, 226, 223, 215, 210, 204, 203, + 118, 6, 3, 0, 22, 107, 168, 199, 4, 197, + 5, 195, 16, 193, 1, 182, +} +var yyR1 = []int{ + + 0, 1, 1, 1, 2, 2, 2, 3, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 5, 5, 6, 6, 6, 7, 7, 8, + 8, 9, 9, 10, 10, 10, 11, 11, 12, 12, + 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, + 13, 13, 13, 13, 13, 13, 13, 14, 15, 15, + 15, 15, 17, 16, 16, 18, 18, 18, 18, 19, + 20, 20, 21, 21, 21, 22, 22, 23, 23, 23, + 24, 24, 24, 25, 25, +} +var yyR2 = []int{ + + 0, 1, 2, 3, 0, 2, 2, 1, 3, 1, + 3, 5, 4, 6, 8, 9, 11, 7, 3, 4, + 4, 2, 0, 5, 1, 2, 1, 1, 3, 1, + 3, 1, 3, 1, 4, 3, 1, 3, 1, 3, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, + 1, 3, 3, 2, 4, 2, 3, 1, 1, 2, + 5, 4, 1, 1, 3, 2, 3, 1, 3, 2, + 3, 5, 1, 1, 1, +} +var yyChk = []int{ + + -1000, -1, -2, -6, -4, 45, 19, 5, -9, -15, + 6, 24, 20, 13, 11, 12, 15, -10, -17, -16, + 35, 31, 45, -12, -13, 16, 10, 22, 32, 30, + -19, -15, -14, -22, 39, 17, 52, 12, -10, 33, + 34, 46, 47, 50, 49, -18, 48, 35, -22, -14, + -3, -1, -13, -3, -13, 31, -11, -7, -8, 31, + 12, -11, 31, -13, -16, 47, 18, 4, 36, 37, + 28, 27, 25, 26, 29, 38, 39, 40, 41, 42, + 44, -13, -13, -13, -20, 35, 54, -23, -24, 31, + 50, -13, -12, -10, -15, -13, 31, 31, 53, -12, + 9, 6, 23, 21, 46, 14, 47, -20, 48, 49, + 31, 46, 53, 53, -13, -13, -13, -13, -13, -13, + -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, + -21, 53, 30, -11, 54, -25, 47, 45, 46, -13, + 51, -18, 53, -3, -13, -3, -13, -12, 31, 31, + 31, -20, -12, 53, -3, 47, -24, -13, 51, 9, + -5, 47, 6, -3, 9, 30, 46, 9, 7, 8, + -13, -3, 9, -13, -3, -13, 6, 47, 9, 9, + 21, -3, -13, -3, 9, 6, -3, 9, +} +var yyDef = []int{ + + 4, -2, 1, 2, 5, 6, 24, 26, 0, 9, + 4, 0, 4, 0, 0, 0, 0, -2, 69, 70, + 0, 33, 3, 25, 38, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 0, 0, 0, 0, 68, 67, + 0, 0, 0, 0, 0, 73, 0, 0, 77, 78, + 0, 7, 0, 0, 0, 36, 0, 0, 27, 29, + 0, 21, 36, 0, 70, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 64, 65, 66, 79, 0, 85, 0, 87, 33, + 0, 92, 8, -2, 0, 0, 35, 0, 75, 0, + 10, 4, 0, 4, 0, 0, 0, 18, 0, 0, + 0, 0, 71, 72, 39, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 0, 4, 82, 83, 86, 89, 93, 94, 0, 0, + 34, 74, 76, 0, 12, 22, 0, 0, 37, 28, + 30, 19, 20, 4, 0, 0, 88, 90, 0, 11, + 0, 0, 4, 0, 81, 84, 0, 13, 4, 0, + 0, 0, 80, 91, 0, 0, 4, 0, 17, 14, + 4, 0, 0, 23, 15, 4, 0, 16, +} +var yyTok1 = []int{ + + 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 52, 3, 42, 3, 3, + 35, 53, 40, 38, 47, 39, 49, 41, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 48, 45, + 37, 46, 36, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 50, 3, 51, 44, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 34, 3, 54, +} +var yyTok2 = []int{ + + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 43, +} +var yyTok3 = []int{ + 0, +} + +//line yaccpar:1 + +/* parser for yacc output */ + +var yyDebug = 0 + +type yyLexer interface { + Lex(lval *yySymType) int + Error(s string) +} + +const yyFlag = -1000 + +func yyTokname(c int) string { + // 4 is TOKSTART above + if c >= 4 && c-4 < len(yyToknames) { + if yyToknames[c-4] != "" { + return yyToknames[c-4] + } + } + return __yyfmt__.Sprintf("tok-%v", c) +} + +func yyStatname(s int) string { + if s >= 0 && s < len(yyStatenames) { + if yyStatenames[s] != "" { + return yyStatenames[s] + } + } + return __yyfmt__.Sprintf("state-%v", s) +} + +func yylex1(lex yyLexer, lval *yySymType) int { + c := 0 + char := lex.Lex(lval) + if char <= 0 { + c = yyTok1[0] + goto out + } + if char < len(yyTok1) { + c = yyTok1[char] + goto out + } + if char >= yyPrivate { + if char < yyPrivate+len(yyTok2) { + c = yyTok2[char-yyPrivate] + goto out + } + } + for i := 0; i < len(yyTok3); i += 2 { + c = yyTok3[i+0] + if c == char { + c = yyTok3[i+1] + goto out + } + } + +out: + if c == 0 { + c = yyTok2[1] /* unknown char */ + } + if yyDebug >= 3 { + __yyfmt__.Printf("lex %s(%d)\n", yyTokname(c), uint(char)) + } + return c +} + +func yyParse(yylex yyLexer) int { + var yyn int + var yylval yySymType + var yyVAL yySymType + yyS := make([]yySymType, yyMaxDepth) + + Nerrs := 0 /* number of errors */ + Errflag := 0 /* error recovery flag */ + yystate := 0 + yychar := -1 + yyp := -1 + goto yystack + +ret0: + return 0 + +ret1: + return 1 + +yystack: + /* put a state and value onto the stack */ + if yyDebug >= 4 { + __yyfmt__.Printf("char %v in %v\n", yyTokname(yychar), yyStatname(yystate)) + } + + yyp++ + if yyp >= len(yyS) { + nyys := make([]yySymType, len(yyS)*2) + copy(nyys, yyS) + yyS = nyys + } + yyS[yyp] = yyVAL + yyS[yyp].yys = yystate + +yynewstate: + yyn = yyPact[yystate] + if yyn <= yyFlag { + goto yydefault /* simple state */ + } + if yychar < 0 { + yychar = yylex1(yylex, &yylval) + } + yyn += yychar + if yyn < 0 || yyn >= yyLast { + goto yydefault + } + yyn = yyAct[yyn] + if yyChk[yyn] == yychar { /* valid shift */ + yychar = -1 + yyVAL = yylval + yystate = yyn + if Errflag > 0 { + Errflag-- + } + goto yystack + } + +yydefault: + /* default state action */ + yyn = yyDef[yystate] + if yyn == -2 { + if yychar < 0 { + yychar = yylex1(yylex, &yylval) + } + + /* look through exception table */ + xi := 0 + for { + if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate { + break + } + xi += 2 + } + for xi += 2; ; xi += 2 { + yyn = yyExca[xi+0] + if yyn < 0 || yyn == yychar { + break + } + } + yyn = yyExca[xi+1] + if yyn < 0 { + goto ret0 + } + } + if yyn == 0 { + /* error ... attempt to resume parsing */ + switch Errflag { + case 0: /* brand new error */ + yylex.Error("syntax error") + Nerrs++ + if yyDebug >= 1 { + __yyfmt__.Printf("%s", yyStatname(yystate)) + __yyfmt__.Printf(" saw %s\n", yyTokname(yychar)) + } + fallthrough + + case 1, 2: /* incompletely recovered error ... try again */ + Errflag = 3 + + /* find a state where "error" is a legal shift action */ + for yyp >= 0 { + yyn = yyPact[yyS[yyp].yys] + yyErrCode + if yyn >= 0 && yyn < yyLast { + yystate = yyAct[yyn] /* simulate a shift of "error" */ + if yyChk[yystate] == yyErrCode { + goto yystack + } + } + + /* the current p has no shift on "error", pop stack */ + if yyDebug >= 2 { + __yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys) + } + yyp-- + } + /* there is no state on the stack with an error shift ... abort */ + goto ret1 + + case 3: /* no shift yet; clobber input char */ + if yyDebug >= 2 { + __yyfmt__.Printf("error recovery discards %s\n", yyTokname(yychar)) + } + if yychar == yyEofCode { + goto ret1 + } + yychar = -1 + goto yynewstate /* try again in the same state */ + } + } + + /* reduction by production yyn */ + if yyDebug >= 2 { + __yyfmt__.Printf("reduce %v in:\n\t%v\n", yyn, yyStatname(yystate)) + } + + yynt := yyn + yypt := yyp + _ = yypt // guard against "declared and not used" + + yyp -= yyR2[yyn] + yyVAL = yyS[yyp+1] + + /* consult goto table to find next state */ + yyn = yyR1[yyn] + yyg := yyPgo[yyn] + yyj := yyg + yyS[yyp].yys + 1 + + if yyj >= yyLast { + yystate = yyAct[yyg] + } else { + yystate = yyAct[yyj] + if yyChk[yystate] != -yyn { + yystate = yyAct[yyg] + } + } + // dummy call; replaced with literal code + switch yynt { + + case 1: + //line parser.go.y:73 + { + yyVAL.stmts = yyS[yypt-0].stmts + if l, ok := yylex.(*Lexer); ok { + l.Stmts = yyVAL.stmts + } + } + case 2: + //line parser.go.y:79 + { + yyVAL.stmts = append(yyS[yypt-1].stmts, yyS[yypt-0].stmt) + if l, ok := yylex.(*Lexer); ok { + l.Stmts = yyVAL.stmts + } + } + case 3: + //line parser.go.y:85 + { + yyVAL.stmts = append(yyS[yypt-2].stmts, yyS[yypt-1].stmt) + if l, ok := yylex.(*Lexer); ok { + l.Stmts = yyVAL.stmts + } + } + case 4: + //line parser.go.y:93 + { + yyVAL.stmts = []ast.Stmt{} + } + case 5: + //line parser.go.y:96 + { + yyVAL.stmts = append(yyS[yypt-1].stmts, yyS[yypt-0].stmt) + } + case 6: + //line parser.go.y:99 + { + yyVAL.stmts = yyS[yypt-1].stmts + } + case 7: + //line parser.go.y:104 + { + yyVAL.stmts = yyS[yypt-0].stmts + } + case 8: + //line parser.go.y:109 + { + yyVAL.stmt = &ast.AssignStmt{Lhs: yyS[yypt-2].exprlist, Rhs: yyS[yypt-0].exprlist} + yyVAL.stmt.SetLine(yyS[yypt-2].exprlist[0].Line()) + } + case 9: + //line parser.go.y:114 + { + if _, ok := yyS[yypt-0].expr.(*ast.FuncCallExpr); !ok { + yylex.(*Lexer).Error("parse error") + } else { + yyVAL.stmt = &ast.FuncCallStmt{Expr: yyS[yypt-0].expr} + yyVAL.stmt.SetLine(yyS[yypt-0].expr.Line()) + } + } + case 10: + //line parser.go.y:122 + { + yyVAL.stmt = &ast.DoBlockStmt{Stmts: yyS[yypt-1].stmts} + yyVAL.stmt.SetLine(yyS[yypt-2].token.Pos.Line) + yyVAL.stmt.SetLastLine(yyS[yypt-0].token.Pos.Line) + } + case 11: + //line parser.go.y:127 + { + yyVAL.stmt = &ast.WhileStmt{Condition: yyS[yypt-3].expr, Stmts: yyS[yypt-1].stmts} + yyVAL.stmt.SetLine(yyS[yypt-4].token.Pos.Line) + yyVAL.stmt.SetLastLine(yyS[yypt-0].token.Pos.Line) + } + case 12: + //line parser.go.y:132 + { + yyVAL.stmt = &ast.RepeatStmt{Condition: yyS[yypt-0].expr, Stmts: yyS[yypt-2].stmts} + yyVAL.stmt.SetLine(yyS[yypt-3].token.Pos.Line) + yyVAL.stmt.SetLastLine(yyS[yypt-0].expr.Line()) + } + case 13: + //line parser.go.y:137 + { + yyVAL.stmt = &ast.IfStmt{Condition: yyS[yypt-4].expr, Then: yyS[yypt-2].stmts} + cur := yyVAL.stmt + for _, elseif := range yyS[yypt-1].stmts { + cur.(*ast.IfStmt).Else = []ast.Stmt{elseif} + cur = elseif + } + yyVAL.stmt.SetLine(yyS[yypt-5].token.Pos.Line) + yyVAL.stmt.SetLastLine(yyS[yypt-0].token.Pos.Line) + } + case 14: + //line parser.go.y:147 + { + yyVAL.stmt = &ast.IfStmt{Condition: yyS[yypt-6].expr, Then: yyS[yypt-4].stmts} + cur := yyVAL.stmt + for _, elseif := range yyS[yypt-3].stmts { + cur.(*ast.IfStmt).Else = []ast.Stmt{elseif} + cur = elseif + } + cur.(*ast.IfStmt).Else = yyS[yypt-1].stmts + yyVAL.stmt.SetLine(yyS[yypt-7].token.Pos.Line) + yyVAL.stmt.SetLastLine(yyS[yypt-0].token.Pos.Line) + } + case 15: + //line parser.go.y:158 + { + yyVAL.stmt = &ast.NumberForStmt{Name: yyS[yypt-7].token.Str, Init: yyS[yypt-5].expr, Limit: yyS[yypt-3].expr, Stmts: yyS[yypt-1].stmts} + yyVAL.stmt.SetLine(yyS[yypt-8].token.Pos.Line) + yyVAL.stmt.SetLastLine(yyS[yypt-0].token.Pos.Line) + } + case 16: + //line parser.go.y:163 + { + yyVAL.stmt = &ast.NumberForStmt{Name: yyS[yypt-9].token.Str, Init: yyS[yypt-7].expr, Limit: yyS[yypt-5].expr, Step: yyS[yypt-3].expr, Stmts: yyS[yypt-1].stmts} + yyVAL.stmt.SetLine(yyS[yypt-10].token.Pos.Line) + yyVAL.stmt.SetLastLine(yyS[yypt-0].token.Pos.Line) + } + case 17: + //line parser.go.y:168 + { + yyVAL.stmt = &ast.GenericForStmt{Names: yyS[yypt-5].namelist, Exprs: yyS[yypt-3].exprlist, Stmts: yyS[yypt-1].stmts} + yyVAL.stmt.SetLine(yyS[yypt-6].token.Pos.Line) + yyVAL.stmt.SetLastLine(yyS[yypt-0].token.Pos.Line) + } + case 18: + //line parser.go.y:173 + { + yyVAL.stmt = &ast.FuncDefStmt{Name: yyS[yypt-1].funcname, Func: yyS[yypt-0].funcexpr} + yyVAL.stmt.SetLine(yyS[yypt-2].token.Pos.Line) + yyVAL.stmt.SetLastLine(yyS[yypt-0].funcexpr.LastLine()) + } + case 19: + //line parser.go.y:178 + { + yyVAL.stmt = &ast.LocalAssignStmt{Names: []string{yyS[yypt-1].token.Str}, Exprs: []ast.Expr{yyS[yypt-0].funcexpr}} + yyVAL.stmt.SetLine(yyS[yypt-3].token.Pos.Line) + yyVAL.stmt.SetLastLine(yyS[yypt-0].funcexpr.LastLine()) + } + case 20: + //line parser.go.y:183 + { + yyVAL.stmt = &ast.LocalAssignStmt{Names: yyS[yypt-2].namelist, Exprs: yyS[yypt-0].exprlist} + yyVAL.stmt.SetLine(yyS[yypt-3].token.Pos.Line) + } + case 21: + //line parser.go.y:187 + { + yyVAL.stmt = &ast.LocalAssignStmt{Names: yyS[yypt-0].namelist, Exprs: []ast.Expr{}} + yyVAL.stmt.SetLine(yyS[yypt-1].token.Pos.Line) + } + case 22: + //line parser.go.y:193 + { + yyVAL.stmts = []ast.Stmt{} + } + case 23: + //line parser.go.y:196 + { + yyVAL.stmts = append(yyS[yypt-4].stmts, &ast.IfStmt{Condition: yyS[yypt-2].expr, Then: yyS[yypt-0].stmts}) + yyVAL.stmts[len(yyVAL.stmts)-1].SetLine(yyS[yypt-3].token.Pos.Line) + } + case 24: + //line parser.go.y:202 + { + yyVAL.stmt = &ast.ReturnStmt{Exprs: nil} + yyVAL.stmt.SetLine(yyS[yypt-0].token.Pos.Line) + } + case 25: + //line parser.go.y:206 + { + yyVAL.stmt = &ast.ReturnStmt{Exprs: yyS[yypt-0].exprlist} + yyVAL.stmt.SetLine(yyS[yypt-1].token.Pos.Line) + } + case 26: + //line parser.go.y:210 + { + yyVAL.stmt = &ast.BreakStmt{} + yyVAL.stmt.SetLine(yyS[yypt-0].token.Pos.Line) + } + case 27: + //line parser.go.y:216 + { + yyVAL.funcname = yyS[yypt-0].funcname + } + case 28: + //line parser.go.y:219 + { + yyVAL.funcname = &ast.FuncName{Func: nil, Receiver: yyS[yypt-2].funcname.Func, Method: yyS[yypt-0].token.Str} + } + case 29: + //line parser.go.y:224 + { + yyVAL.funcname = &ast.FuncName{Func: &ast.IdentExpr{Value: yyS[yypt-0].token.Str}} + yyVAL.funcname.Func.SetLine(yyS[yypt-0].token.Pos.Line) + } + case 30: + //line parser.go.y:228 + { + key := &ast.StringExpr{Value: yyS[yypt-0].token.Str} + key.SetLine(yyS[yypt-0].token.Pos.Line) + fn := &ast.AttrGetExpr{Object: yyS[yypt-2].funcname.Func, Key: key} + fn.SetLine(yyS[yypt-0].token.Pos.Line) + yyVAL.funcname = &ast.FuncName{Func: fn} + } + case 31: + //line parser.go.y:237 + { + yyVAL.exprlist = []ast.Expr{yyS[yypt-0].expr} + } + case 32: + //line parser.go.y:240 + { + yyVAL.exprlist = append(yyS[yypt-2].exprlist, yyS[yypt-0].expr) + } + case 33: + //line parser.go.y:245 + { + yyVAL.expr = &ast.IdentExpr{Value: yyS[yypt-0].token.Str} + yyVAL.expr.SetLine(yyS[yypt-0].token.Pos.Line) + } + case 34: + //line parser.go.y:249 + { + yyVAL.expr = &ast.AttrGetExpr{Object: yyS[yypt-3].expr, Key: yyS[yypt-1].expr} + yyVAL.expr.SetLine(yyS[yypt-3].expr.Line()) + } + case 35: + //line parser.go.y:253 + { + key := &ast.StringExpr{Value: yyS[yypt-0].token.Str} + key.SetLine(yyS[yypt-0].token.Pos.Line) + yyVAL.expr = &ast.AttrGetExpr{Object: yyS[yypt-2].expr, Key: key} + yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + } + case 36: + //line parser.go.y:261 + { + yyVAL.namelist = []string{yyS[yypt-0].token.Str} + } + case 37: + //line parser.go.y:264 + { + yyVAL.namelist = append(yyS[yypt-2].namelist, yyS[yypt-0].token.Str) + } + case 38: + //line parser.go.y:269 + { + yyVAL.exprlist = []ast.Expr{yyS[yypt-0].expr} + } + case 39: + //line parser.go.y:272 + { + yyVAL.exprlist = append(yyS[yypt-2].exprlist, yyS[yypt-0].expr) + } + case 40: + //line parser.go.y:277 + { + yyVAL.expr = &ast.NilExpr{} + yyVAL.expr.SetLine(yyS[yypt-0].token.Pos.Line) + } + case 41: + //line parser.go.y:281 + { + yyVAL.expr = &ast.FalseExpr{} + yyVAL.expr.SetLine(yyS[yypt-0].token.Pos.Line) + } + case 42: + //line parser.go.y:285 + { + yyVAL.expr = &ast.TrueExpr{} + yyVAL.expr.SetLine(yyS[yypt-0].token.Pos.Line) + } + case 43: + //line parser.go.y:289 + { + yyVAL.expr = &ast.NumberExpr{Value: yyS[yypt-0].token.Str} + yyVAL.expr.SetLine(yyS[yypt-0].token.Pos.Line) + } + case 44: + //line parser.go.y:293 + { + yyVAL.expr = &ast.Comma3Expr{} + yyVAL.expr.SetLine(yyS[yypt-0].token.Pos.Line) + } + case 45: + //line parser.go.y:297 + { + yyVAL.expr = yyS[yypt-0].expr + } + case 46: + //line parser.go.y:300 + { + yyVAL.expr = yyS[yypt-0].expr + } + case 47: + //line parser.go.y:303 + { + yyVAL.expr = yyS[yypt-0].expr + } + case 48: + //line parser.go.y:306 + { + yyVAL.expr = yyS[yypt-0].expr + } + case 49: + //line parser.go.y:309 + { + yyVAL.expr = &ast.LogicalOpExpr{Lhs: yyS[yypt-2].expr, Operator: "or", Rhs: yyS[yypt-0].expr} + yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + } + case 50: + //line parser.go.y:313 + { + yyVAL.expr = &ast.LogicalOpExpr{Lhs: yyS[yypt-2].expr, Operator: "and", Rhs: yyS[yypt-0].expr} + yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + } + case 51: + //line parser.go.y:317 + { + yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyS[yypt-2].expr, Operator: ">", Rhs: yyS[yypt-0].expr} + yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + } + case 52: + //line parser.go.y:321 + { + yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyS[yypt-2].expr, Operator: "<", Rhs: yyS[yypt-0].expr} + yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + } + case 53: + //line parser.go.y:325 + { + yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyS[yypt-2].expr, Operator: ">=", Rhs: yyS[yypt-0].expr} + yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + } + case 54: + //line parser.go.y:329 + { + yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyS[yypt-2].expr, Operator: "<=", Rhs: yyS[yypt-0].expr} + yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + } + case 55: + //line parser.go.y:333 + { + yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyS[yypt-2].expr, Operator: "==", Rhs: yyS[yypt-0].expr} + yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + } + case 56: + //line parser.go.y:337 + { + yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyS[yypt-2].expr, Operator: "~=", Rhs: yyS[yypt-0].expr} + yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + } + case 57: + //line parser.go.y:341 + { + yyVAL.expr = &ast.StringConcatOpExpr{Lhs: yyS[yypt-2].expr, Rhs: yyS[yypt-0].expr} + yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + } + case 58: + //line parser.go.y:345 + { + yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyS[yypt-2].expr, Operator: "+", Rhs: yyS[yypt-0].expr} + yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + } + case 59: + //line parser.go.y:349 + { + yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyS[yypt-2].expr, Operator: "-", Rhs: yyS[yypt-0].expr} + yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + } + case 60: + //line parser.go.y:353 + { + yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyS[yypt-2].expr, Operator: "*", Rhs: yyS[yypt-0].expr} + yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + } + case 61: + //line parser.go.y:357 + { + yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyS[yypt-2].expr, Operator: "/", Rhs: yyS[yypt-0].expr} + yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + } + case 62: + //line parser.go.y:361 + { + yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyS[yypt-2].expr, Operator: "%", Rhs: yyS[yypt-0].expr} + yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + } + case 63: + //line parser.go.y:365 + { + yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyS[yypt-2].expr, Operator: "^", Rhs: yyS[yypt-0].expr} + yyVAL.expr.SetLine(yyS[yypt-2].expr.Line()) + } + case 64: + //line parser.go.y:369 + { + yyVAL.expr = &ast.UnaryMinusOpExpr{Expr: yyS[yypt-0].expr} + yyVAL.expr.SetLine(yyS[yypt-0].expr.Line()) + } + case 65: + //line parser.go.y:373 + { + yyVAL.expr = &ast.UnaryNotOpExpr{Expr: yyS[yypt-0].expr} + yyVAL.expr.SetLine(yyS[yypt-0].expr.Line()) + } + case 66: + //line parser.go.y:377 + { + yyVAL.expr = &ast.UnaryLenOpExpr{Expr: yyS[yypt-0].expr} + yyVAL.expr.SetLine(yyS[yypt-0].expr.Line()) + } + case 67: + //line parser.go.y:383 + { + yyVAL.expr = &ast.StringExpr{Value: yyS[yypt-0].token.Str} + yyVAL.expr.SetLine(yyS[yypt-0].token.Pos.Line) + } + case 68: + //line parser.go.y:389 + { + yyVAL.expr = yyS[yypt-0].expr + } + case 69: + //line parser.go.y:392 + { + yyVAL.expr = yyS[yypt-0].expr + } + case 70: + //line parser.go.y:395 + { + yyVAL.expr = yyS[yypt-0].expr + } + case 71: + //line parser.go.y:398 + { + yyVAL.expr = yyS[yypt-1].expr + yyVAL.expr.SetLine(yyS[yypt-2].token.Pos.Line) + } + case 72: + //line parser.go.y:404 + { + yyS[yypt-1].expr.(*ast.FuncCallExpr).AdjustRet = true + yyVAL.expr = yyS[yypt-1].expr + } + case 73: + //line parser.go.y:410 + { + yyVAL.expr = &ast.FuncCallExpr{Func: yyS[yypt-1].expr, Args: yyS[yypt-0].exprlist} + yyVAL.expr.SetLine(yyS[yypt-1].expr.Line()) + } + case 74: + //line parser.go.y:414 + { + yyVAL.expr = &ast.FuncCallExpr{Method: yyS[yypt-1].token.Str, Receiver: yyS[yypt-3].expr, Args: yyS[yypt-0].exprlist} + yyVAL.expr.SetLine(yyS[yypt-3].expr.Line()) + } + case 75: + //line parser.go.y:420 + { + if yylex.(*Lexer).PNewLine { + yylex.(*Lexer).TokenError(yyS[yypt-1].token, "ambiguous syntax (function call x new statement)") + } + yyVAL.exprlist = []ast.Expr{} + } + case 76: + //line parser.go.y:426 + { + if yylex.(*Lexer).PNewLine { + yylex.(*Lexer).TokenError(yyS[yypt-2].token, "ambiguous syntax (function call x new statement)") + } + yyVAL.exprlist = yyS[yypt-1].exprlist + } + case 77: + //line parser.go.y:432 + { + yyVAL.exprlist = []ast.Expr{yyS[yypt-0].expr} + } + case 78: + //line parser.go.y:435 + { + yyVAL.exprlist = []ast.Expr{yyS[yypt-0].expr} + } + case 79: + //line parser.go.y:440 + { + yyVAL.expr = &ast.FunctionExpr{ParList: yyS[yypt-0].funcexpr.ParList, Stmts: yyS[yypt-0].funcexpr.Stmts} + yyVAL.expr.SetLine(yyS[yypt-1].token.Pos.Line) + yyVAL.expr.SetLastLine(yyS[yypt-0].funcexpr.LastLine()) + } + case 80: + //line parser.go.y:447 + { + yyVAL.funcexpr = &ast.FunctionExpr{ParList: yyS[yypt-3].parlist, Stmts: yyS[yypt-1].stmts} + yyVAL.funcexpr.SetLine(yyS[yypt-4].token.Pos.Line) + yyVAL.funcexpr.SetLastLine(yyS[yypt-0].token.Pos.Line) + } + case 81: + //line parser.go.y:452 + { + yyVAL.funcexpr = &ast.FunctionExpr{ParList: &ast.ParList{HasVargs: false, Names: []string{}}, Stmts: yyS[yypt-1].stmts} + yyVAL.funcexpr.SetLine(yyS[yypt-3].token.Pos.Line) + yyVAL.funcexpr.SetLastLine(yyS[yypt-0].token.Pos.Line) + } + case 82: + //line parser.go.y:459 + { + yyVAL.parlist = &ast.ParList{HasVargs: true, Names: []string{}} + } + case 83: + //line parser.go.y:462 + { + yyVAL.parlist = &ast.ParList{HasVargs: false, Names: []string{}} + yyVAL.parlist.Names = append(yyVAL.parlist.Names, yyS[yypt-0].namelist...) + } + case 84: + //line parser.go.y:466 + { + yyVAL.parlist = &ast.ParList{HasVargs: true, Names: []string{}} + yyVAL.parlist.Names = append(yyVAL.parlist.Names, yyS[yypt-2].namelist...) + } + case 85: + //line parser.go.y:473 + { + yyVAL.expr = &ast.TableExpr{Fields: []*ast.Field{}} + yyVAL.expr.SetLine(yyS[yypt-1].token.Pos.Line) + } + case 86: + //line parser.go.y:477 + { + yyVAL.expr = &ast.TableExpr{Fields: yyS[yypt-1].fieldlist} + yyVAL.expr.SetLine(yyS[yypt-2].token.Pos.Line) + } + case 87: + //line parser.go.y:484 + { + yyVAL.fieldlist = []*ast.Field{yyS[yypt-0].field} + } + case 88: + //line parser.go.y:487 + { + yyVAL.fieldlist = append(yyS[yypt-2].fieldlist, yyS[yypt-0].field) + } + case 89: + //line parser.go.y:490 + { + yyVAL.fieldlist = yyS[yypt-1].fieldlist + } + case 90: + //line parser.go.y:495 + { + yyVAL.field = &ast.Field{Key: &ast.StringExpr{Value: yyS[yypt-2].token.Str}, Value: yyS[yypt-0].expr} + yyVAL.field.Key.SetLine(yyS[yypt-2].token.Pos.Line) + } + case 91: + //line parser.go.y:499 + { + yyVAL.field = &ast.Field{Key: yyS[yypt-3].expr, Value: yyS[yypt-0].expr} + } + case 92: + //line parser.go.y:502 + { + yyVAL.field = &ast.Field{Value: yyS[yypt-0].expr} + } + case 93: + //line parser.go.y:507 + { + yyVAL.fieldsep = "," + } + case 94: + //line parser.go.y:510 + { + yyVAL.fieldsep = ";" + } + } + goto yystack /* stack new state and value */ +} diff --git a/vendor/github.com/yuin/gopher-lua/parse/parser.go.y b/vendor/github.com/yuin/gopher-lua/parse/parser.go.y new file mode 100644 index 00000000000..956133db292 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/parse/parser.go.y @@ -0,0 +1,524 @@ +%{ +package parse + +import ( + "github.com/yuin/gopher-lua/ast" +) +%} +%type chunk +%type chunk1 +%type block +%type stat +%type elseifs +%type laststat +%type funcname +%type funcname1 +%type varlist +%type var +%type namelist +%type exprlist +%type expr +%type string +%type prefixexp +%type functioncall +%type afunctioncall +%type args +%type function +%type funcbody +%type parlist +%type tableconstructor +%type fieldlist +%type field +%type fieldsep + +%union { + token ast.Token + + stmts []ast.Stmt + stmt ast.Stmt + + funcname *ast.FuncName + funcexpr *ast.FunctionExpr + + exprlist []ast.Expr + expr ast.Expr + + fieldlist []*ast.Field + field *ast.Field + fieldsep string + + namelist []string + parlist *ast.ParList +} + +/* Reserved words */ +%token TAnd TBreak TDo TElse TElseIf TEnd TFalse TFor TFunction TIf TIn TLocal TNil TNot TOr TReturn TRepeat TThen TTrue TUntil TWhile + +/* Literals */ +%token TEqeq TNeq TLte TGte T2Comma T3Comma TIdent TNumber TString '{' '(' + +/* Operators */ +%left TOr +%left TAnd +%left '>' '<' TGte TLte TEqeq TNeq +%right T2Comma +%left '+' '-' +%left '*' '/' '%' +%right UNARY /* not # -(unary) */ +%right '^' + +%% + +chunk: + chunk1 { + $$ = $1 + if l, ok := yylex.(*Lexer); ok { + l.Stmts = $$ + } + } | + chunk1 laststat { + $$ = append($1, $2) + if l, ok := yylex.(*Lexer); ok { + l.Stmts = $$ + } + } | + chunk1 laststat ';' { + $$ = append($1, $2) + if l, ok := yylex.(*Lexer); ok { + l.Stmts = $$ + } + } + +chunk1: + { + $$ = []ast.Stmt{} + } | + chunk1 stat { + $$ = append($1, $2) + } | + chunk1 ';' { + $$ = $1 + } + +block: + chunk { + $$ = $1 + } + +stat: + varlist '=' exprlist { + $$ = &ast.AssignStmt{Lhs: $1, Rhs: $3} + $$.SetLine($1[0].Line()) + } | + /* 'stat = functioncal' causes a reduce/reduce conflict */ + prefixexp { + if _, ok := $1.(*ast.FuncCallExpr); !ok { + yylex.(*Lexer).Error("parse error") + } else { + $$ = &ast.FuncCallStmt{Expr: $1} + $$.SetLine($1.Line()) + } + } | + TDo block TEnd { + $$ = &ast.DoBlockStmt{Stmts: $2} + $$.SetLine($1.Pos.Line) + $$.SetLastLine($3.Pos.Line) + } | + TWhile expr TDo block TEnd { + $$ = &ast.WhileStmt{Condition: $2, Stmts: $4} + $$.SetLine($1.Pos.Line) + $$.SetLastLine($5.Pos.Line) + } | + TRepeat block TUntil expr { + $$ = &ast.RepeatStmt{Condition: $4, Stmts: $2} + $$.SetLine($1.Pos.Line) + $$.SetLastLine($4.Line()) + } | + TIf expr TThen block elseifs TEnd { + $$ = &ast.IfStmt{Condition: $2, Then: $4} + cur := $$ + for _, elseif := range $5 { + cur.(*ast.IfStmt).Else = []ast.Stmt{elseif} + cur = elseif + } + $$.SetLine($1.Pos.Line) + $$.SetLastLine($6.Pos.Line) + } | + TIf expr TThen block elseifs TElse block TEnd { + $$ = &ast.IfStmt{Condition: $2, Then: $4} + cur := $$ + for _, elseif := range $5 { + cur.(*ast.IfStmt).Else = []ast.Stmt{elseif} + cur = elseif + } + cur.(*ast.IfStmt).Else = $7 + $$.SetLine($1.Pos.Line) + $$.SetLastLine($8.Pos.Line) + } | + TFor TIdent '=' expr ',' expr TDo block TEnd { + $$ = &ast.NumberForStmt{Name: $2.Str, Init: $4, Limit: $6, Stmts: $8} + $$.SetLine($1.Pos.Line) + $$.SetLastLine($9.Pos.Line) + } | + TFor TIdent '=' expr ',' expr ',' expr TDo block TEnd { + $$ = &ast.NumberForStmt{Name: $2.Str, Init: $4, Limit: $6, Step:$8, Stmts: $10} + $$.SetLine($1.Pos.Line) + $$.SetLastLine($11.Pos.Line) + } | + TFor namelist TIn exprlist TDo block TEnd { + $$ = &ast.GenericForStmt{Names:$2, Exprs:$4, Stmts: $6} + $$.SetLine($1.Pos.Line) + $$.SetLastLine($7.Pos.Line) + } | + TFunction funcname funcbody { + $$ = &ast.FuncDefStmt{Name: $2, Func: $3} + $$.SetLine($1.Pos.Line) + $$.SetLastLine($3.LastLine()) + } | + TLocal TFunction TIdent funcbody { + $$ = &ast.LocalAssignStmt{Names:[]string{$3.Str}, Exprs: []ast.Expr{$4}} + $$.SetLine($1.Pos.Line) + $$.SetLastLine($4.LastLine()) + } | + TLocal namelist '=' exprlist { + $$ = &ast.LocalAssignStmt{Names: $2, Exprs:$4} + $$.SetLine($1.Pos.Line) + } | + TLocal namelist { + $$ = &ast.LocalAssignStmt{Names: $2, Exprs:[]ast.Expr{}} + $$.SetLine($1.Pos.Line) + } + +elseifs: + { + $$ = []ast.Stmt{} + } | + elseifs TElseIf expr TThen block { + $$ = append($1, &ast.IfStmt{Condition: $3, Then: $5}) + $$[len($$)-1].SetLine($2.Pos.Line) + } + +laststat: + TReturn { + $$ = &ast.ReturnStmt{Exprs:nil} + $$.SetLine($1.Pos.Line) + } | + TReturn exprlist { + $$ = &ast.ReturnStmt{Exprs:$2} + $$.SetLine($1.Pos.Line) + } | + TBreak { + $$ = &ast.BreakStmt{} + $$.SetLine($1.Pos.Line) + } + +funcname: + funcname1 { + $$ = $1 + } | + funcname1 ':' TIdent { + $$ = &ast.FuncName{Func:nil, Receiver:$1.Func, Method: $3.Str} + } + +funcname1: + TIdent { + $$ = &ast.FuncName{Func: &ast.IdentExpr{Value:$1.Str}} + $$.Func.SetLine($1.Pos.Line) + } | + funcname1 '.' TIdent { + key:= &ast.StringExpr{Value:$3.Str} + key.SetLine($3.Pos.Line) + fn := &ast.AttrGetExpr{Object: $1.Func, Key: key} + fn.SetLine($3.Pos.Line) + $$ = &ast.FuncName{Func: fn} + } + +varlist: + var { + $$ = []ast.Expr{$1} + } | + varlist ',' var { + $$ = append($1, $3) + } + +var: + TIdent { + $$ = &ast.IdentExpr{Value:$1.Str} + $$.SetLine($1.Pos.Line) + } | + prefixexp '[' expr ']' { + $$ = &ast.AttrGetExpr{Object: $1, Key: $3} + $$.SetLine($1.Line()) + } | + prefixexp '.' TIdent { + key := &ast.StringExpr{Value:$3.Str} + key.SetLine($3.Pos.Line) + $$ = &ast.AttrGetExpr{Object: $1, Key: key} + $$.SetLine($1.Line()) + } + +namelist: + TIdent { + $$ = []string{$1.Str} + } | + namelist ',' TIdent { + $$ = append($1, $3.Str) + } + +exprlist: + expr { + $$ = []ast.Expr{$1} + } | + exprlist ',' expr { + $$ = append($1, $3) + } + +expr: + TNil { + $$ = &ast.NilExpr{} + $$.SetLine($1.Pos.Line) + } | + TFalse { + $$ = &ast.FalseExpr{} + $$.SetLine($1.Pos.Line) + } | + TTrue { + $$ = &ast.TrueExpr{} + $$.SetLine($1.Pos.Line) + } | + TNumber { + $$ = &ast.NumberExpr{Value: $1.Str} + $$.SetLine($1.Pos.Line) + } | + T3Comma { + $$ = &ast.Comma3Expr{} + $$.SetLine($1.Pos.Line) + } | + function { + $$ = $1 + } | + prefixexp { + $$ = $1 + } | + string { + $$ = $1 + } | + tableconstructor { + $$ = $1 + } | + expr TOr expr { + $$ = &ast.LogicalOpExpr{Lhs: $1, Operator: "or", Rhs: $3} + $$.SetLine($1.Line()) + } | + expr TAnd expr { + $$ = &ast.LogicalOpExpr{Lhs: $1, Operator: "and", Rhs: $3} + $$.SetLine($1.Line()) + } | + expr '>' expr { + $$ = &ast.RelationalOpExpr{Lhs: $1, Operator: ">", Rhs: $3} + $$.SetLine($1.Line()) + } | + expr '<' expr { + $$ = &ast.RelationalOpExpr{Lhs: $1, Operator: "<", Rhs: $3} + $$.SetLine($1.Line()) + } | + expr TGte expr { + $$ = &ast.RelationalOpExpr{Lhs: $1, Operator: ">=", Rhs: $3} + $$.SetLine($1.Line()) + } | + expr TLte expr { + $$ = &ast.RelationalOpExpr{Lhs: $1, Operator: "<=", Rhs: $3} + $$.SetLine($1.Line()) + } | + expr TEqeq expr { + $$ = &ast.RelationalOpExpr{Lhs: $1, Operator: "==", Rhs: $3} + $$.SetLine($1.Line()) + } | + expr TNeq expr { + $$ = &ast.RelationalOpExpr{Lhs: $1, Operator: "~=", Rhs: $3} + $$.SetLine($1.Line()) + } | + expr T2Comma expr { + $$ = &ast.StringConcatOpExpr{Lhs: $1, Rhs: $3} + $$.SetLine($1.Line()) + } | + expr '+' expr { + $$ = &ast.ArithmeticOpExpr{Lhs: $1, Operator: "+", Rhs: $3} + $$.SetLine($1.Line()) + } | + expr '-' expr { + $$ = &ast.ArithmeticOpExpr{Lhs: $1, Operator: "-", Rhs: $3} + $$.SetLine($1.Line()) + } | + expr '*' expr { + $$ = &ast.ArithmeticOpExpr{Lhs: $1, Operator: "*", Rhs: $3} + $$.SetLine($1.Line()) + } | + expr '/' expr { + $$ = &ast.ArithmeticOpExpr{Lhs: $1, Operator: "/", Rhs: $3} + $$.SetLine($1.Line()) + } | + expr '%' expr { + $$ = &ast.ArithmeticOpExpr{Lhs: $1, Operator: "%", Rhs: $3} + $$.SetLine($1.Line()) + } | + expr '^' expr { + $$ = &ast.ArithmeticOpExpr{Lhs: $1, Operator: "^", Rhs: $3} + $$.SetLine($1.Line()) + } | + '-' expr %prec UNARY { + $$ = &ast.UnaryMinusOpExpr{Expr: $2} + $$.SetLine($2.Line()) + } | + TNot expr %prec UNARY { + $$ = &ast.UnaryNotOpExpr{Expr: $2} + $$.SetLine($2.Line()) + } | + '#' expr %prec UNARY { + $$ = &ast.UnaryLenOpExpr{Expr: $2} + $$.SetLine($2.Line()) + } + +string: + TString { + $$ = &ast.StringExpr{Value: $1.Str} + $$.SetLine($1.Pos.Line) + } + +prefixexp: + var { + $$ = $1 + } | + afunctioncall { + $$ = $1 + } | + functioncall { + $$ = $1 + } | + '(' expr ')' { + $$ = $2 + $$.SetLine($1.Pos.Line) + } + +afunctioncall: + '(' functioncall ')' { + $2.(*ast.FuncCallExpr).AdjustRet = true + $$ = $2 + } + +functioncall: + prefixexp args { + $$ = &ast.FuncCallExpr{Func: $1, Args: $2} + $$.SetLine($1.Line()) + } | + prefixexp ':' TIdent args { + $$ = &ast.FuncCallExpr{Method: $3.Str, Receiver: $1, Args: $4} + $$.SetLine($1.Line()) + } + +args: + '(' ')' { + if yylex.(*Lexer).PNewLine { + yylex.(*Lexer).TokenError($1, "ambiguous syntax (function call x new statement)") + } + $$ = []ast.Expr{} + } | + '(' exprlist ')' { + if yylex.(*Lexer).PNewLine { + yylex.(*Lexer).TokenError($1, "ambiguous syntax (function call x new statement)") + } + $$ = $2 + } | + tableconstructor { + $$ = []ast.Expr{$1} + } | + string { + $$ = []ast.Expr{$1} + } + +function: + TFunction funcbody { + $$ = &ast.FunctionExpr{ParList:$2.ParList, Stmts: $2.Stmts} + $$.SetLine($1.Pos.Line) + $$.SetLastLine($2.LastLine()) + } + +funcbody: + '(' parlist ')' block TEnd { + $$ = &ast.FunctionExpr{ParList: $2, Stmts: $4} + $$.SetLine($1.Pos.Line) + $$.SetLastLine($5.Pos.Line) + } | + '(' ')' block TEnd { + $$ = &ast.FunctionExpr{ParList: &ast.ParList{HasVargs: false, Names: []string{}}, Stmts: $3} + $$.SetLine($1.Pos.Line) + $$.SetLastLine($4.Pos.Line) + } + +parlist: + T3Comma { + $$ = &ast.ParList{HasVargs: true, Names: []string{}} + } | + namelist { + $$ = &ast.ParList{HasVargs: false, Names: []string{}} + $$.Names = append($$.Names, $1...) + } | + namelist ',' T3Comma { + $$ = &ast.ParList{HasVargs: true, Names: []string{}} + $$.Names = append($$.Names, $1...) + } + + +tableconstructor: + '{' '}' { + $$ = &ast.TableExpr{Fields: []*ast.Field{}} + $$.SetLine($1.Pos.Line) + } | + '{' fieldlist '}' { + $$ = &ast.TableExpr{Fields: $2} + $$.SetLine($1.Pos.Line) + } + + +fieldlist: + field { + $$ = []*ast.Field{$1} + } | + fieldlist fieldsep field { + $$ = append($1, $3) + } | + fieldlist fieldsep { + $$ = $1 + } + +field: + TIdent '=' expr { + $$ = &ast.Field{Key: &ast.StringExpr{Value:$1.Str}, Value: $3} + $$.Key.SetLine($1.Pos.Line) + } | + '[' expr ']' '=' expr { + $$ = &ast.Field{Key: $2, Value: $5} + } | + expr { + $$ = &ast.Field{Value: $1} + } + +fieldsep: + ',' { + $$ = "," + } | + ';' { + $$ = ";" + } + +%% + +func TokenName(c int) string { + if c >= TAnd && c-TAnd < len(yyToknames) { + if yyToknames[c-TAnd] != "" { + return yyToknames[c-TAnd] + } + } + return string([]byte{byte(c)}) +} + diff --git a/vendor/github.com/yuin/gopher-lua/pm/pm.go b/vendor/github.com/yuin/gopher-lua/pm/pm.go new file mode 100644 index 00000000000..918deb442b6 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/pm/pm.go @@ -0,0 +1,637 @@ +// Lua pattern match functions for Go +package pm + +import ( + "fmt" +) + +const EOS = -1 +const _UNKNOWN = -2 + +/* Error {{{ */ + +type Error struct { + Pos int + Message string +} + +func newError(pos int, message string, args ...interface{}) *Error { + if len(args) == 0 { + return &Error{pos, message} + } + return &Error{pos, fmt.Sprintf(message, args...)} +} + +func (e *Error) Error() string { + switch e.Pos { + case EOS: + return fmt.Sprintf("%s at EOS", e.Message) + case _UNKNOWN: + return fmt.Sprintf("%s", e.Message) + default: + return fmt.Sprintf("%s at %d", e.Message, e.Pos) + } +} + +/* }}} */ + +/* MatchData {{{ */ + +type MatchData struct { + // captured positions + // layout + // xxxx xxxx xxxx xxx0 : caputured positions + // xxxx xxxx xxxx xxx1 : position captured positions + captures []uint32 +} + +func newMatchState() *MatchData { return &MatchData{[]uint32{}} } + +func (st *MatchData) addPosCapture(s, pos int) { + for s+1 >= len(st.captures) { + st.captures = append(st.captures, 0) + } + st.captures[s] = (uint32(pos) << 1) | 1 + st.captures[s+1] = (uint32(pos) << 1) | 1 +} + +func (st *MatchData) setCapture(s, pos int) uint32 { + for s >= len(st.captures) { + st.captures = append(st.captures, 0) + } + v := st.captures[s] + st.captures[s] = (uint32(pos) << 1) + return v +} + +func (st *MatchData) restoreCapture(s int, pos uint32) { st.captures[s] = pos } + +func (st *MatchData) CaptureLength() int { return len(st.captures) } + +func (st *MatchData) IsPosCapture(idx int) bool { return (st.captures[idx] & 1) == 1 } + +func (st *MatchData) Capture(idx int) int { return int(st.captures[idx] >> 1) } + +/* }}} */ + +/* scanner {{{ */ + +type scannerState struct { + Pos int + started bool +} + +type scanner struct { + src []byte + State scannerState + saved scannerState +} + +func newScanner(src []byte) *scanner { + return &scanner{ + src: src, + State: scannerState{ + Pos: 0, + started: false, + }, + saved: scannerState{}, + } +} + +func (sc *scanner) Length() int { return len(sc.src) } + +func (sc *scanner) Next() int { + if !sc.State.started { + sc.State.started = true + if len(sc.src) == 0 { + sc.State.Pos = EOS + } + } else { + sc.State.Pos = sc.NextPos() + } + if sc.State.Pos == EOS { + return EOS + } + return int(sc.src[sc.State.Pos]) +} + +func (sc *scanner) CurrentPos() int { + return sc.State.Pos +} + +func (sc *scanner) NextPos() int { + if sc.State.Pos == EOS || sc.State.Pos >= len(sc.src)-1 { + return EOS + } + if !sc.State.started { + return 0 + } else { + return sc.State.Pos + 1 + } +} + +func (sc *scanner) Peek() int { + cureof := sc.State.Pos == EOS + ch := sc.Next() + if !cureof { + if sc.State.Pos == EOS { + sc.State.Pos = len(sc.src) - 1 + } else { + sc.State.Pos-- + if sc.State.Pos < 0 { + sc.State.Pos = 0 + sc.State.started = false + } + } + } + return ch +} + +func (sc *scanner) Save() { sc.saved = sc.State } + +func (sc *scanner) Restore() { sc.State = sc.saved } + +/* }}} */ + +/* bytecode {{{ */ + +type opCode int + +const ( + opChar opCode = iota + opMatch + opTailMatch + opJmp + opSplit + opSave + opPSave + opBrace + opNumber +) + +type inst struct { + OpCode opCode + Class class + Operand1 int + Operand2 int +} + +/* }}} */ + +/* classes {{{ */ + +type class interface { + Matches(ch int) bool +} + +type dotClass struct{} + +func (pn *dotClass) Matches(ch int) bool { return true } + +type charClass struct { + Ch int +} + +func (pn *charClass) Matches(ch int) bool { return pn.Ch == ch } + +type singleClass struct { + Class int +} + +func (pn *singleClass) Matches(ch int) bool { + ret := false + switch pn.Class { + case 'a', 'A': + ret = 'A' <= ch && ch <= 'Z' || 'a' <= ch && ch <= 'z' + case 'c', 'C': + ret = (0x00 <= ch && ch <= 0x1F) || ch == 0x7F + case 'd', 'D': + ret = '0' <= ch && ch <= '9' + case 'l', 'L': + ret = 'a' <= ch && ch <= 'z' + case 'p', 'P': + ret = (0x21 <= ch && ch <= 0x2f) || (0x30 <= ch && ch <= 0x40) || (0x5b <= ch && ch <= 0x60) || (0x7b <= ch && ch <= 0x7e) + case 's', 'S': + switch ch { + case ' ', '\f', '\n', '\r', '\t', '\v': + ret = true + } + case 'u', 'U': + ret = 'A' <= ch && ch <= 'Z' + case 'w', 'W': + ret = '0' <= ch && ch <= '9' || 'A' <= ch && ch <= 'Z' || 'a' <= ch && ch <= 'z' + case 'x', 'X': + ret = '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' + case 'z', 'Z': + ret = ch == 0 + default: + return ch == pn.Class + } + if 'A' <= pn.Class && pn.Class <= 'Z' { + return !ret + } + return ret +} + +type setClass struct { + IsNot bool + Classes []class +} + +func (pn *setClass) Matches(ch int) bool { + for _, class := range pn.Classes { + if class.Matches(ch) { + return !pn.IsNot + } + } + return pn.IsNot +} + +type rangeClass struct { + Begin class + End class +} + +func (pn *rangeClass) Matches(ch int) bool { + switch begin := pn.Begin.(type) { + case *charClass: + end, ok := pn.End.(*charClass) + if !ok { + return false + } + return begin.Ch <= ch && ch <= end.Ch + } + return false +} + +// }}} + +// patterns {{{ + +type pattern interface{} + +type singlePattern struct { + Class class +} + +type seqPattern struct { + MustHead bool + MustTail bool + Patterns []pattern +} + +type repeatPattern struct { + Type int + Class class +} + +type posCapPattern struct{} + +type capPattern struct { + Pattern pattern +} + +type numberPattern struct { + N int +} + +type bracePattern struct { + Begin int + End int +} + +// }}} + +/* parse {{{ */ + +func parseClass(sc *scanner, allowset bool) class { + ch := sc.Next() + switch ch { + case '%': + return &singleClass{sc.Next()} + case '.': + if allowset { + return &dotClass{} + } else { + return &charClass{ch} + } + case '[': + if !allowset { + panic(newError(sc.CurrentPos(), "invalid '['")) + } + return parseClassSet(sc) + //case '^' '$', '(', ')', ']', '*', '+', '-', '?': + // panic(newError(sc.CurrentPos(), "invalid %c", ch)) + case EOS: + panic(newError(sc.CurrentPos(), "unexpected EOS")) + default: + return &charClass{ch} + } +} + +func parseClassSet(sc *scanner) class { + set := &setClass{false, []class{}} + if sc.Peek() == '^' { + set.IsNot = true + sc.Next() + } + isrange := false + for { + ch := sc.Peek() + switch ch { + case '[': + panic(newError(sc.CurrentPos(), "'[' can not be nested")) + case ']': + sc.Next() + goto exit + case EOS: + panic(newError(sc.CurrentPos(), "unexpected EOS")) + case '-': + if len(set.Classes) > 0 { + sc.Next() + isrange = true + continue + } + fallthrough + default: + set.Classes = append(set.Classes, parseClass(sc, false)) + } + if isrange { + begin := set.Classes[len(set.Classes)-2] + end := set.Classes[len(set.Classes)-1] + set.Classes = set.Classes[0 : len(set.Classes)-2] + set.Classes = append(set.Classes, &rangeClass{begin, end}) + isrange = false + } + } +exit: + if isrange { + set.Classes = append(set.Classes, &charClass{'-'}) + } + + return set +} + +func parsePattern(sc *scanner, toplevel bool) *seqPattern { + pat := &seqPattern{} + if toplevel { + if sc.Peek() == '^' { + sc.Next() + pat.MustHead = true + } + } + for { + ch := sc.Peek() + switch ch { + case '%': + sc.Save() + sc.Next() + switch sc.Peek() { + case '0': + panic(newError(sc.CurrentPos(), "invalid capture index")) + case '1', '2', '3', '4', '5', '6', '7', '8', '9': + pat.Patterns = append(pat.Patterns, &numberPattern{sc.Next() - 48}) + case 'b': + sc.Next() + pat.Patterns = append(pat.Patterns, &bracePattern{sc.Next(), sc.Next()}) + default: + sc.Restore() + pat.Patterns = append(pat.Patterns, &singlePattern{parseClass(sc, true)}) + } + case '.', '[': + pat.Patterns = append(pat.Patterns, &singlePattern{parseClass(sc, true)}) + case ']': + panic(newError(sc.CurrentPos(), "invalid ']'")) + case ')': + if toplevel { + panic(newError(sc.CurrentPos(), "invalid ')'")) + } + return pat + case '(': + sc.Next() + if sc.Peek() == ')' { + sc.Next() + pat.Patterns = append(pat.Patterns, &posCapPattern{}) + } else { + ret := &capPattern{parsePattern(sc, false)} + if sc.Peek() != ')' { + panic(newError(sc.CurrentPos(), "unfinished capture")) + } + sc.Next() + pat.Patterns = append(pat.Patterns, ret) + } + case '*', '+', '-', '?': + sc.Next() + if len(pat.Patterns) > 0 { + spat, ok := pat.Patterns[len(pat.Patterns)-1].(*singlePattern) + if ok { + pat.Patterns = pat.Patterns[0 : len(pat.Patterns)-1] + pat.Patterns = append(pat.Patterns, &repeatPattern{ch, spat.Class}) + continue + } + } + pat.Patterns = append(pat.Patterns, &singlePattern{&charClass{ch}}) + case '$': + if toplevel && (sc.NextPos() == sc.Length()-1 || sc.NextPos() == EOS) { + pat.MustTail = true + } else { + pat.Patterns = append(pat.Patterns, &singlePattern{&charClass{ch}}) + } + sc.Next() + case EOS: + sc.Next() + goto exit + default: + sc.Next() + pat.Patterns = append(pat.Patterns, &singlePattern{&charClass{ch}}) + } + } +exit: + return pat +} + +type iptr struct { + insts []inst + capture int +} + +func compilePattern(p pattern, ps ...*iptr) []inst { + var ptr *iptr + toplevel := false + if len(ps) == 0 { + toplevel = true + ptr = &iptr{[]inst{inst{opSave, nil, 0, -1}}, 2} + } else { + ptr = ps[0] + } + switch pat := p.(type) { + case *singlePattern: + ptr.insts = append(ptr.insts, inst{opChar, pat.Class, -1, -1}) + case *seqPattern: + for _, cp := range pat.Patterns { + compilePattern(cp, ptr) + } + case *repeatPattern: + idx := len(ptr.insts) + switch pat.Type { + case '*': + ptr.insts = append(ptr.insts, + inst{opSplit, nil, idx + 1, idx + 3}, + inst{opChar, pat.Class, -1, -1}, + inst{opJmp, nil, idx, -1}) + case '+': + ptr.insts = append(ptr.insts, + inst{opChar, pat.Class, -1, -1}, + inst{opSplit, nil, idx, idx + 2}) + case '-': + ptr.insts = append(ptr.insts, + inst{opSplit, nil, idx + 3, idx + 1}, + inst{opChar, pat.Class, -1, -1}, + inst{opJmp, nil, idx, -1}) + case '?': + ptr.insts = append(ptr.insts, + inst{opSplit, nil, idx + 1, idx + 2}, + inst{opChar, pat.Class, -1, -1}) + } + case *posCapPattern: + ptr.insts = append(ptr.insts, inst{opPSave, nil, ptr.capture, -1}) + ptr.capture += 2 + case *capPattern: + c0, c1 := ptr.capture, ptr.capture+1 + ptr.capture += 2 + ptr.insts = append(ptr.insts, inst{opSave, nil, c0, -1}) + compilePattern(pat.Pattern, ptr) + ptr.insts = append(ptr.insts, inst{opSave, nil, c1, -1}) + case *bracePattern: + ptr.insts = append(ptr.insts, inst{opBrace, nil, pat.Begin, pat.End}) + case *numberPattern: + ptr.insts = append(ptr.insts, inst{opNumber, nil, pat.N, -1}) + } + if toplevel { + if p.(*seqPattern).MustTail { + ptr.insts = append(ptr.insts, inst{opSave, nil, 1, -1}, inst{opTailMatch, nil, -1, -1}) + } + ptr.insts = append(ptr.insts, inst{opSave, nil, 1, -1}, inst{opMatch, nil, -1, -1}) + } + return ptr.insts +} + +/* }}} parse */ + +/* VM {{{ */ + +// Simple recursive virtual machine based on the +// "Regular Expression Matching: the Virtual Machine Approach" (https://swtch.com/~rsc/regexp/regexp2.html) +func recursiveVM(src []byte, insts []inst, pc, sp int, ms ...*MatchData) (bool, int, *MatchData) { + var m *MatchData + if len(ms) == 0 { + m = newMatchState() + } else { + m = ms[0] + } +redo: + inst := insts[pc] + switch inst.OpCode { + case opChar: + if sp >= len(src) || !inst.Class.Matches(int(src[sp])) { + return false, sp, m + } + pc++ + sp++ + goto redo + case opMatch: + return true, sp, m + case opTailMatch: + return sp >= len(src), sp, m + case opJmp: + pc = inst.Operand1 + goto redo + case opSplit: + if ok, nsp, _ := recursiveVM(src, insts, inst.Operand1, sp, m); ok { + return true, nsp, m + } + pc = inst.Operand2 + goto redo + case opSave: + s := m.setCapture(inst.Operand1, sp) + if ok, nsp, _ := recursiveVM(src, insts, pc+1, sp, m); ok { + return true, nsp, m + } + m.restoreCapture(inst.Operand1, s) + return false, sp, m + case opPSave: + m.addPosCapture(inst.Operand1, sp+1) + pc++ + goto redo + case opBrace: + if sp >= len(src) || int(src[sp]) != inst.Operand1 { + return false, sp, m + } + count := 1 + for sp = sp + 1; sp < len(src); sp++ { + if int(src[sp]) == inst.Operand2 { + count-- + } + if count == 0 { + pc++ + sp++ + goto redo + } + if int(src[sp]) == inst.Operand1 { + count++ + } + } + return false, sp, m + case opNumber: + idx := inst.Operand1 * 2 + if idx >= m.CaptureLength()-1 { + panic(newError(_UNKNOWN, "invalid capture index")) + } + capture := src[m.Capture(idx):m.Capture(idx+1)] + for i := 0; i < len(capture); i++ { + if i+sp >= len(src) || capture[i] != src[i+sp] { + return false, sp, m + } + } + pc++ + sp += len(capture) + goto redo + } + panic("should not reach here") + return false, sp, m +} + +/* }}} */ + +/* API {{{ */ + +func Find(p string, src []byte, offset, limit int) (matches []*MatchData, err error) { + defer func() { + if v := recover(); v != nil { + if perr, ok := v.(*Error); ok { + err = perr + } else { + panic(v) + } + } + }() + pat := parsePattern(newScanner([]byte(p)), true) + insts := compilePattern(pat) + matches = []*MatchData{} + for sp := offset; sp <= len(src); { + ok, nsp, ms := recursiveVM(src, insts, 0, sp) + sp++ + if ok { + if sp < nsp { + sp = nsp + } + matches = append(matches, ms) + } + if len(matches) == limit || pat.MustHead { + break + } + } + return +} + +/* }}} */ diff --git a/vendor/github.com/yuin/gopher-lua/state.go b/vendor/github.com/yuin/gopher-lua/state.go new file mode 100644 index 00000000000..8e38851df71 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/state.go @@ -0,0 +1,1870 @@ +package lua + +//////////////////////////////////////////////////////// +// This file was generated by go-inline. DO NOT EDIT. // +//////////////////////////////////////////////////////// + +import ( + "fmt" + "github.com/yuin/gopher-lua/parse" + "golang.org/x/net/context" + "io" + "math" + "os" + "runtime" + "strings" + "sync/atomic" + "time" +) + +const MultRet = -1 +const RegistryIndex = -10000 +const EnvironIndex = -10001 +const GlobalsIndex = -10002 + +/* ApiError {{{ */ + +type ApiError struct { + Type ApiErrorType + Object LValue + StackTrace string + // Underlying error. This attribute is set only if the Type is ApiErrorFile or ApiErrorSyntax + Cause error +} + +func newApiError(code ApiErrorType, object LValue) *ApiError { + return &ApiError{code, object, "", nil} +} + +func newApiErrorS(code ApiErrorType, message string) *ApiError { + return newApiError(code, LString(message)) +} + +func newApiErrorE(code ApiErrorType, err error) *ApiError { + return &ApiError{code, LString(err.Error()), "", err} +} + +func (e *ApiError) Error() string { + if len(e.StackTrace) > 0 { + return fmt.Sprintf("%s\n%s", e.Object.String(), e.StackTrace) + } + return e.Object.String() +} + +type ApiErrorType int + +const ( + ApiErrorSyntax ApiErrorType = iota + ApiErrorFile + ApiErrorRun + ApiErrorError + ApiErrorPanic +) + +/* }}} */ + +/* ResumeState {{{ */ + +type ResumeState int + +const ( + ResumeOK ResumeState = iota + ResumeYield + ResumeError +) + +/* }}} */ + +/* P {{{ */ + +type P struct { + Fn LValue + NRet int + Protect bool + Handler *LFunction +} + +/* }}} */ + +/* Options {{{ */ + +// Options is a configuration that is used to create a new LState. +type Options struct { + // Call stack size. This defaults to `lua.CallStackSize`. + CallStackSize int + // Data stack size. This defaults to `lua.RegistrySize`. + RegistrySize int + // Controls whether or not libraries are opened by default + SkipOpenLibs bool + // Tells whether a Go stacktrace should be included in a Lua stacktrace when panics occur. + IncludeGoStackTrace bool +} + +/* }}} */ + +/* Debug {{{ */ + +type Debug struct { + frame *callFrame + Name string + What string + Source string + CurrentLine int + NUpvalues int + LineDefined int + LastLineDefined int +} + +/* }}} */ + +/* callFrame {{{ */ + +type callFrame struct { + Idx int + Fn *LFunction + Parent *callFrame + Pc int + Base int + LocalBase int + ReturnBase int + NArgs int + NRet int + TailCall int +} + +type callFrameStack struct { + array []callFrame + sp int +} + +func newCallFrameStack(size int) *callFrameStack { + return &callFrameStack{ + array: make([]callFrame, size), + sp: 0, + } +} + +func (cs *callFrameStack) IsEmpty() bool { return cs.sp == 0 } + +func (cs *callFrameStack) Clear() { + cs.sp = 0 +} + +func (cs *callFrameStack) Push(v callFrame) { // +inline-start + cs.array[cs.sp] = v + cs.array[cs.sp].Idx = cs.sp + cs.sp++ +} // +inline-end + +func (cs *callFrameStack) Remove(sp int) { + psp := sp - 1 + nsp := sp + 1 + var pre *callFrame + var next *callFrame + if psp > 0 { + pre = &cs.array[psp] + } + if nsp < cs.sp { + next = &cs.array[nsp] + } + if next != nil { + next.Parent = pre + } + for i := sp; i+1 < cs.sp; i++ { + cs.array[i] = cs.array[i+1] + cs.array[i].Idx = i + cs.sp = i + } + cs.sp++ +} + +func (cs *callFrameStack) Sp() int { + return cs.sp +} + +func (cs *callFrameStack) SetSp(sp int) { + cs.sp = sp +} + +func (cs *callFrameStack) Last() *callFrame { + if cs.sp == 0 { + return nil + } + return &cs.array[cs.sp-1] +} + +func (cs *callFrameStack) At(sp int) *callFrame { + return &cs.array[sp] +} + +func (cs *callFrameStack) Pop() *callFrame { + cs.sp-- + return &cs.array[cs.sp] +} + +/* }}} */ + +/* registry {{{ */ + +type registry struct { + array []LValue + top int + alloc *allocator +} + +func newRegistry(size int, alloc *allocator) *registry { + return ®istry{make([]LValue, size), 0, alloc} +} + +func (rg *registry) SetTop(top int) { + oldtop := rg.top + rg.top = top + for i := oldtop; i < rg.top; i++ { + rg.array[i] = LNil + } + for i := rg.top; i < oldtop; i++ { + rg.array[i] = LNil + } +} + +func (rg *registry) Top() int { + return rg.top +} + +func (rg *registry) Push(v LValue) { + rg.array[rg.top] = v + rg.top++ +} + +func (rg *registry) Pop() LValue { + v := rg.array[rg.top-1] + rg.array[rg.top-1] = LNil + rg.top-- + return v +} + +func (rg *registry) Get(reg int) LValue { + return rg.array[reg] +} + +func (rg *registry) CopyRange(regv, start, limit, n int) { // +inline-start + for i := 0; i < n; i++ { + if tidx := start + i; tidx >= rg.top || limit > -1 && tidx >= limit || tidx < 0 { + rg.array[regv+i] = LNil + } else { + rg.array[regv+i] = rg.array[tidx] + } + } + rg.top = regv + n +} // +inline-end + +func (rg *registry) FillNil(regm, n int) { // +inline-start + for i := 0; i < n; i++ { + rg.array[regm+i] = LNil + } + rg.top = regm + n +} // +inline-end + +func (rg *registry) Insert(value LValue, reg int) { + top := rg.Top() + if reg >= top { + rg.Set(reg, value) + return + } + top-- + for ; top >= reg; top-- { + rg.Set(top+1, rg.Get(top)) + } + rg.Set(reg, value) +} + +func (rg *registry) Set(reg int, val LValue) { + rg.array[reg] = val + if reg >= rg.top { + rg.top = reg + 1 + } +} + +func (rg *registry) SetNumber(reg int, val LNumber) { + rg.array[reg] = rg.alloc.LNumber2I(val) + if reg >= rg.top { + rg.top = reg + 1 + } +} /* }}} */ + +/* Global {{{ */ + +func newGlobal() *Global { + return &Global{ + MainThread: nil, + Registry: newLTable(0, 32), + Global: newLTable(0, 64), + builtinMts: make(map[int]LValue), + tempFiles: make([]*os.File, 0, 10), + } +} + +/* }}} */ + +/* package local methods {{{ */ + +func panicWithTraceback(L *LState) { + err := newApiError(ApiErrorRun, L.Get(-1)) + err.StackTrace = L.stackTrace(0) + panic(err) +} + +func panicWithoutTraceback(L *LState) { + err := newApiError(ApiErrorRun, L.Get(-1)) + panic(err) +} + +func newLState(options Options) *LState { + al := newAllocator(32) + ls := &LState{ + G: newGlobal(), + Parent: nil, + Panic: panicWithTraceback, + Dead: false, + Options: options, + + stop: 0, + reg: newRegistry(options.RegistrySize, al), + stack: newCallFrameStack(options.CallStackSize), + alloc: al, + currentFrame: nil, + wrapped: false, + uvcache: nil, + hasErrorFunc: false, + mainLoop: mainLoop, + ctx: nil, + } + ls.Env = ls.G.Global + return ls +} + +func (ls *LState) printReg() { + println("-------------------------") + println("thread:", ls) + println("top:", ls.reg.Top()) + if ls.currentFrame != nil { + println("function base:", ls.currentFrame.Base) + println("return base:", ls.currentFrame.ReturnBase) + } else { + println("(vm not started)") + } + println("local base:", ls.currentLocalBase()) + for i := 0; i < ls.reg.Top(); i++ { + println(i, ls.reg.Get(i).String()) + } + println("-------------------------") +} + +func (ls *LState) printCallStack() { + println("-------------------------") + for i := 0; i < ls.stack.Sp(); i++ { + print(i) + print(" ") + frame := ls.stack.At(i) + if frame == nil { + break + } + if frame.Fn.IsG { + println("IsG:", true, "Frame:", frame, "Fn:", frame.Fn) + } else { + println("IsG:", false, "Frame:", frame, "Fn:", frame.Fn, "pc:", frame.Pc) + } + } + println("-------------------------") +} + +func (ls *LState) closeAllUpvalues() { // +inline-start + for cf := ls.currentFrame; cf != nil; cf = cf.Parent { + if !cf.Fn.IsG { + ls.closeUpvalues(cf.LocalBase) + } + } +} // +inline-end + +func (ls *LState) raiseError(level int, format string, args ...interface{}) { + if !ls.hasErrorFunc { + ls.closeAllUpvalues() + } + message := format + if len(args) > 0 { + message = fmt.Sprintf(format, args...) + } + if level > 0 { + message = fmt.Sprintf("%v %v", ls.where(level-1, true), message) + } + ls.reg.Push(LString(message)) + ls.Panic(ls) +} + +func (ls *LState) findLocal(frame *callFrame, no int) string { + fn := frame.Fn + if !fn.IsG { + if name, ok := fn.LocalName(no, frame.Pc-1); ok { + return name + } + } + var top int + if ls.currentFrame == frame { + top = ls.reg.Top() + } else if frame.Idx+1 < ls.stack.Sp() { + top = ls.stack.At(frame.Idx + 1).Base + } else { + return "" + } + if top-frame.LocalBase >= no { + return "(*temporary)" + } + return "" +} + +func (ls *LState) where(level int, skipg bool) string { + dbg, ok := ls.GetStack(level) + if !ok { + return "" + } + cf := dbg.frame + proto := cf.Fn.Proto + sourcename := "[G]" + if proto != nil { + sourcename = proto.SourceName + } else if skipg { + return ls.where(level+1, skipg) + } + line := "" + if proto != nil { + line = fmt.Sprintf("%v:", proto.DbgSourcePositions[cf.Pc-1]) + } + return fmt.Sprintf("%v:%v", sourcename, line) +} + +func (ls *LState) stackTrace(level int) string { + buf := []string{} + header := "stack traceback:" + if ls.currentFrame != nil { + i := 0 + for dbg, ok := ls.GetStack(i); ok; dbg, ok = ls.GetStack(i) { + cf := dbg.frame + buf = append(buf, fmt.Sprintf("\t%v in %v", ls.Where(i), ls.formattedFrameFuncName(cf))) + if !cf.Fn.IsG && cf.TailCall > 0 { + for tc := cf.TailCall; tc > 0; tc-- { + buf = append(buf, "\t(tailcall): ?") + i++ + } + } + i++ + } + } + buf = append(buf, fmt.Sprintf("\t%v: %v", "[G]", "?")) + buf = buf[intMax(0, intMin(level, len(buf))):len(buf)] + if len(buf) > 20 { + newbuf := make([]string, 0, 20) + newbuf = append(newbuf, buf[0:7]...) + newbuf = append(newbuf, "\t...") + newbuf = append(newbuf, buf[len(buf)-7:len(buf)]...) + buf = newbuf + } + return fmt.Sprintf("%s\n%s", header, strings.Join(buf, "\n")) +} + +func (ls *LState) formattedFrameFuncName(fr *callFrame) string { + name, ischunk := ls.frameFuncName(fr) + if ischunk { + return name + } + if name[0] != '(' && name[0] != '<' { + return fmt.Sprintf("function '%s'", name) + } + return fmt.Sprintf("function %s", name) +} + +func (ls *LState) rawFrameFuncName(fr *callFrame) string { + name, _ := ls.frameFuncName(fr) + return name +} + +func (ls *LState) frameFuncName(fr *callFrame) (string, bool) { + frame := fr.Parent + if frame == nil { + if ls.Parent == nil { + return "main chunk", true + } else { + return "corountine", true + } + } + if !frame.Fn.IsG { + pc := frame.Pc - 1 + for _, call := range frame.Fn.Proto.DbgCalls { + if call.Pc == pc { + name := call.Name + if (name == "?" || fr.TailCall > 0) && !fr.Fn.IsG { + name = fmt.Sprintf("<%v:%v>", fr.Fn.Proto.SourceName, fr.Fn.Proto.LineDefined) + } + return name, false + } + } + } + if !fr.Fn.IsG { + return fmt.Sprintf("<%v:%v>", fr.Fn.Proto.SourceName, fr.Fn.Proto.LineDefined), false + } + return "(anonymous)", false +} + +func (ls *LState) isStarted() bool { + return ls.currentFrame != nil +} + +func (ls *LState) kill() { + ls.Dead = true +} + +func (ls *LState) indexToReg(idx int) int { + base := ls.currentLocalBase() + if idx > 0 { + return base + idx - 1 + } else if idx == 0 { + return -1 + } else { + tidx := ls.reg.Top() + idx + if tidx < base { + return -1 + } + return tidx + } +} + +func (ls *LState) currentLocalBase() int { + base := 0 + if ls.currentFrame != nil { + base = ls.currentFrame.LocalBase + } + return base +} + +func (ls *LState) currentEnv() *LTable { + return ls.Env + /* + if ls.currentFrame == nil { + return ls.Env + } + return ls.currentFrame.Fn.Env + */ +} + +func (ls *LState) rkValue(idx int) LValue { + /* + if OpIsK(idx) { + return ls.currentFrame.Fn.Proto.Constants[opIndexK(idx)] + } + return ls.reg.Get(ls.currentFrame.LocalBase + idx) + */ + if (idx & opBitRk) != 0 { + return ls.currentFrame.Fn.Proto.Constants[idx & ^opBitRk] + } + return ls.reg.array[ls.currentFrame.LocalBase+idx] +} + +func (ls *LState) rkString(idx int) string { + if (idx & opBitRk) != 0 { + return ls.currentFrame.Fn.Proto.stringConstants[idx & ^opBitRk] + } + return string(ls.reg.array[ls.currentFrame.LocalBase+idx].(LString)) +} + +func (ls *LState) closeUpvalues(idx int) { // +inline-start + if ls.uvcache != nil { + var prev *Upvalue + for uv := ls.uvcache; uv != nil; uv = uv.next { + if uv.index >= idx { + if prev != nil { + prev.next = nil + } else { + ls.uvcache = nil + } + uv.Close() + } + prev = uv + } + } +} // +inline-end + +func (ls *LState) findUpvalue(idx int) *Upvalue { + var prev *Upvalue + var next *Upvalue + if ls.uvcache != nil { + for uv := ls.uvcache; uv != nil; uv = uv.next { + if uv.index == idx { + return uv + } + if uv.index > idx { + next = uv + break + } + prev = uv + } + } + uv := &Upvalue{reg: ls.reg, index: idx, closed: false} + if prev != nil { + prev.next = uv + } else { + ls.uvcache = uv + } + if next != nil { + uv.next = next + } + return uv +} + +func (ls *LState) metatable(lvalue LValue, rawget bool) LValue { + var metatable LValue = LNil + switch obj := lvalue.(type) { + case *LTable: + metatable = obj.Metatable + case *LUserData: + metatable = obj.Metatable + default: + if table, ok := ls.G.builtinMts[int(obj.Type())]; ok { + metatable = table + } + } + + if !rawget && metatable != LNil { + oldmt := metatable + if tb, ok := metatable.(*LTable); ok { + metatable = tb.RawGetString("__metatable") + if metatable == LNil { + metatable = oldmt + } + } + } + + return metatable +} + +func (ls *LState) metaOp1(lvalue LValue, event string) LValue { + if mt := ls.metatable(lvalue, true); mt != LNil { + if tb, ok := mt.(*LTable); ok { + return tb.RawGetString(event) + } + } + return LNil +} + +func (ls *LState) metaOp2(value1, value2 LValue, event string) LValue { + if mt := ls.metatable(value1, true); mt != LNil { + if tb, ok := mt.(*LTable); ok { + if ret := tb.RawGetString(event); ret != LNil { + return ret + } + } + } + if mt := ls.metatable(value2, true); mt != LNil { + if tb, ok := mt.(*LTable); ok { + return tb.RawGetString(event) + } + } + return LNil +} + +func (ls *LState) metaCall(lvalue LValue) (*LFunction, bool) { + if fn, ok := lvalue.(*LFunction); ok { + return fn, false + } + if fn, ok := ls.metaOp1(lvalue, "__call").(*LFunction); ok { + return fn, true + } + return nil, false +} + +func (ls *LState) initCallFrame(cf *callFrame) { // +inline-start + if cf.Fn.IsG { + ls.reg.SetTop(cf.LocalBase + cf.NArgs) + } else { + proto := cf.Fn.Proto + nargs := cf.NArgs + np := int(proto.NumParameters) + for i := nargs; i < np; i++ { + ls.reg.array[cf.LocalBase+i] = LNil + nargs = np + } + + if (proto.IsVarArg & VarArgIsVarArg) == 0 { + if nargs < int(proto.NumUsedRegisters) { + nargs = int(proto.NumUsedRegisters) + } + for i := np; i < nargs; i++ { + ls.reg.array[cf.LocalBase+i] = LNil + } + ls.reg.top = cf.LocalBase + int(proto.NumUsedRegisters) + } else { + /* swap vararg positions: + closure + namedparam1 <- lbase + namedparam2 + vararg1 + vararg2 + + TO + + closure + nil + nil + vararg1 + vararg2 + namedparam1 <- lbase + namedparam2 + */ + nvarargs := nargs - np + if nvarargs < 0 { + nvarargs = 0 + } + + ls.reg.SetTop(cf.LocalBase + nargs + np) + for i := 0; i < np; i++ { + //ls.reg.Set(cf.LocalBase+nargs+i, ls.reg.Get(cf.LocalBase+i)) + ls.reg.array[cf.LocalBase+nargs+i] = ls.reg.array[cf.LocalBase+i] + //ls.reg.Set(cf.LocalBase+i, LNil) + ls.reg.array[cf.LocalBase+i] = LNil + } + + if CompatVarArg { + ls.reg.SetTop(cf.LocalBase + nargs + np + 1) + if (proto.IsVarArg & VarArgNeedsArg) != 0 { + argtb := newLTable(nvarargs, 0) + for i := 0; i < nvarargs; i++ { + argtb.RawSetInt(i+1, ls.reg.Get(cf.LocalBase+np+i)) + } + argtb.RawSetString("n", LNumber(nvarargs)) + //ls.reg.Set(cf.LocalBase+nargs+np, argtb) + ls.reg.array[cf.LocalBase+nargs+np] = argtb + } else { + ls.reg.array[cf.LocalBase+nargs+np] = LNil + } + } + cf.LocalBase += nargs + maxreg := cf.LocalBase + int(proto.NumUsedRegisters) + ls.reg.SetTop(maxreg) + } + } +} // +inline-end + +func (ls *LState) pushCallFrame(cf callFrame, fn LValue, meta bool) { // +inline-start + if meta { + cf.NArgs++ + ls.reg.Insert(fn, cf.LocalBase) + } + if cf.Fn == nil { + ls.RaiseError("attempt to call a non-function object") + } + if ls.stack.sp == ls.Options.CallStackSize { + ls.RaiseError("stack overflow") + } + // this section is inlined by go-inline + // source function is 'func (cs *callFrameStack) Push(v callFrame) ' in '_state.go' + { + cs := ls.stack + v := cf + cs.array[cs.sp] = v + cs.array[cs.sp].Idx = cs.sp + cs.sp++ + } + newcf := ls.stack.Last() + // this section is inlined by go-inline + // source function is 'func (ls *LState) initCallFrame(cf *callFrame) ' in '_state.go' + { + cf := newcf + if cf.Fn.IsG { + ls.reg.SetTop(cf.LocalBase + cf.NArgs) + } else { + proto := cf.Fn.Proto + nargs := cf.NArgs + np := int(proto.NumParameters) + for i := nargs; i < np; i++ { + ls.reg.array[cf.LocalBase+i] = LNil + nargs = np + } + + if (proto.IsVarArg & VarArgIsVarArg) == 0 { + if nargs < int(proto.NumUsedRegisters) { + nargs = int(proto.NumUsedRegisters) + } + for i := np; i < nargs; i++ { + ls.reg.array[cf.LocalBase+i] = LNil + } + ls.reg.top = cf.LocalBase + int(proto.NumUsedRegisters) + } else { + /* swap vararg positions: + closure + namedparam1 <- lbase + namedparam2 + vararg1 + vararg2 + + TO + + closure + nil + nil + vararg1 + vararg2 + namedparam1 <- lbase + namedparam2 + */ + nvarargs := nargs - np + if nvarargs < 0 { + nvarargs = 0 + } + + ls.reg.SetTop(cf.LocalBase + nargs + np) + for i := 0; i < np; i++ { + //ls.reg.Set(cf.LocalBase+nargs+i, ls.reg.Get(cf.LocalBase+i)) + ls.reg.array[cf.LocalBase+nargs+i] = ls.reg.array[cf.LocalBase+i] + //ls.reg.Set(cf.LocalBase+i, LNil) + ls.reg.array[cf.LocalBase+i] = LNil + } + + if CompatVarArg { + ls.reg.SetTop(cf.LocalBase + nargs + np + 1) + if (proto.IsVarArg & VarArgNeedsArg) != 0 { + argtb := newLTable(nvarargs, 0) + for i := 0; i < nvarargs; i++ { + argtb.RawSetInt(i+1, ls.reg.Get(cf.LocalBase+np+i)) + } + argtb.RawSetString("n", LNumber(nvarargs)) + //ls.reg.Set(cf.LocalBase+nargs+np, argtb) + ls.reg.array[cf.LocalBase+nargs+np] = argtb + } else { + ls.reg.array[cf.LocalBase+nargs+np] = LNil + } + } + cf.LocalBase += nargs + maxreg := cf.LocalBase + int(proto.NumUsedRegisters) + ls.reg.SetTop(maxreg) + } + } + } + ls.currentFrame = newcf +} // +inline-end + +func (ls *LState) callR(nargs, nret, rbase int) { + base := ls.reg.Top() - nargs - 1 + if rbase < 0 { + rbase = base + } + lv := ls.reg.Get(base) + fn, meta := ls.metaCall(lv) + ls.pushCallFrame(callFrame{ + Fn: fn, + Pc: 0, + Base: base, + LocalBase: base + 1, + ReturnBase: rbase, + NArgs: nargs, + NRet: nret, + Parent: ls.currentFrame, + TailCall: 0, + }, lv, meta) + if ls.G.MainThread == nil { + ls.G.MainThread = ls + ls.G.CurrentThread = ls + ls.mainLoop(ls, nil) + } else { + ls.mainLoop(ls, ls.currentFrame) + } + if nret != MultRet { + ls.reg.SetTop(rbase + nret) + } +} + +func (ls *LState) getField(obj LValue, key LValue) LValue { + curobj := obj + for i := 0; i < MaxTableGetLoop; i++ { + tb, istable := curobj.(*LTable) + if istable { + ret := tb.RawGet(key) + if ret != LNil { + return ret + } + } + metaindex := ls.metaOp1(curobj, "__index") + if metaindex == LNil { + if !istable { + ls.RaiseError("attempt to index a non-table object(%v)", curobj.Type().String()) + } + return LNil + } + if metaindex.Type() == LTFunction { + ls.reg.Push(metaindex) + ls.reg.Push(curobj) + ls.reg.Push(key) + ls.Call(2, 1) + return ls.reg.Pop() + } else { + curobj = metaindex + } + } + ls.RaiseError("too many recursions in gettable") + return nil +} + +func (ls *LState) getFieldString(obj LValue, key string) LValue { + curobj := obj + for i := 0; i < MaxTableGetLoop; i++ { + tb, istable := curobj.(*LTable) + if istable { + ret := tb.RawGetString(key) + if ret != LNil { + return ret + } + } + metaindex := ls.metaOp1(curobj, "__index") + if metaindex == LNil { + if !istable { + ls.RaiseError("attempt to index a non-table object(%v)", curobj.Type().String()) + } + return LNil + } + if metaindex.Type() == LTFunction { + ls.reg.Push(metaindex) + ls.reg.Push(curobj) + ls.reg.Push(LString(key)) + ls.Call(2, 1) + return ls.reg.Pop() + } else { + curobj = metaindex + } + } + ls.RaiseError("too many recursions in gettable") + return nil +} + +func (ls *LState) setField(obj LValue, key LValue, value LValue) { + curobj := obj + for i := 0; i < MaxTableGetLoop; i++ { + tb, istable := curobj.(*LTable) + if istable { + if tb.RawGet(key) != LNil { + ls.RawSet(tb, key, value) + return + } + } + metaindex := ls.metaOp1(curobj, "__newindex") + if metaindex == LNil { + if !istable { + ls.RaiseError("attempt to index a non-table object(%v)", curobj.Type().String()) + } + ls.RawSet(tb, key, value) + return + } + if metaindex.Type() == LTFunction { + ls.reg.Push(metaindex) + ls.reg.Push(curobj) + ls.reg.Push(key) + ls.reg.Push(value) + ls.Call(3, 0) + return + } else { + curobj = metaindex + } + } + ls.RaiseError("too many recursions in settable") +} + +func (ls *LState) setFieldString(obj LValue, key string, value LValue) { + curobj := obj + for i := 0; i < MaxTableGetLoop; i++ { + tb, istable := curobj.(*LTable) + if istable { + if tb.RawGetString(key) != LNil { + tb.RawSetString(key, value) + return + } + } + metaindex := ls.metaOp1(curobj, "__newindex") + if metaindex == LNil { + if !istable { + ls.RaiseError("attempt to index a non-table object(%v)", curobj.Type().String()) + } + tb.RawSetString(key, value) + return + } + if metaindex.Type() == LTFunction { + ls.reg.Push(metaindex) + ls.reg.Push(curobj) + ls.reg.Push(LString(key)) + ls.reg.Push(value) + ls.Call(3, 0) + return + } else { + curobj = metaindex + } + } + ls.RaiseError("too many recursions in settable") +} + +/* }}} */ + +/* api methods {{{ */ + +func NewState(opts ...Options) *LState { + var ls *LState + if len(opts) == 0 { + ls = newLState(Options{ + CallStackSize: CallStackSize, + RegistrySize: RegistrySize, + }) + ls.OpenLibs() + } else { + if opts[0].CallStackSize < 1 { + opts[0].CallStackSize = CallStackSize + } + if opts[0].RegistrySize < 128 { + opts[0].RegistrySize = RegistrySize + } + ls = newLState(opts[0]) + if !opts[0].SkipOpenLibs { + ls.OpenLibs() + } + } + return ls +} + +func (ls *LState) Close() { + atomic.AddInt32(&ls.stop, 1) + for _, file := range ls.G.tempFiles { + // ignore errors in these operations + file.Close() + os.Remove(file.Name()) + } +} + +/* registry operations {{{ */ + +func (ls *LState) GetTop() int { + return ls.reg.Top() - ls.currentLocalBase() +} + +func (ls *LState) SetTop(idx int) { + base := ls.currentLocalBase() + newtop := ls.indexToReg(idx) + 1 + if newtop < base { + ls.reg.SetTop(base) + } else { + ls.reg.SetTop(newtop) + } +} + +func (ls *LState) Replace(idx int, value LValue) { + base := ls.currentLocalBase() + if idx > 0 { + reg := base + idx - 1 + if reg < ls.reg.Top() { + ls.reg.Set(reg, value) + } + } else if idx == 0 { + } else if idx > RegistryIndex { + if tidx := ls.reg.Top() + idx; tidx >= base { + ls.reg.Set(tidx, value) + } + } else { + switch idx { + case RegistryIndex: + if tb, ok := value.(*LTable); ok { + ls.G.Registry = tb + } else { + ls.RaiseError("registry must be a table(%v)", value.Type().String()) + } + case EnvironIndex: + if ls.currentFrame == nil { + ls.RaiseError("no calling environment") + } + if tb, ok := value.(*LTable); ok { + ls.currentFrame.Fn.Env = tb + } else { + ls.RaiseError("environment must be a table(%v)", value.Type().String()) + } + case GlobalsIndex: + if tb, ok := value.(*LTable); ok { + ls.G.Global = tb + } else { + ls.RaiseError("_G must be a table(%v)", value.Type().String()) + } + default: + fn := ls.currentFrame.Fn + index := GlobalsIndex - idx - 1 + if index < len(fn.Upvalues) { + fn.Upvalues[index].SetValue(value) + } + } + } +} + +func (ls *LState) Get(idx int) LValue { + base := ls.currentLocalBase() + if idx > 0 { + reg := base + idx - 1 + if reg < ls.reg.Top() { + return ls.reg.Get(reg) + } + return LNil + } else if idx == 0 { + return LNil + } else if idx > RegistryIndex { + tidx := ls.reg.Top() + idx + if tidx < base { + return LNil + } + return ls.reg.Get(tidx) + } else { + switch idx { + case RegistryIndex: + return ls.G.Registry + case EnvironIndex: + if ls.currentFrame == nil { + return ls.Env + } + return ls.currentFrame.Fn.Env + case GlobalsIndex: + return ls.G.Global + default: + fn := ls.currentFrame.Fn + index := GlobalsIndex - idx - 1 + if index < len(fn.Upvalues) { + return fn.Upvalues[index].Value() + } + return LNil + } + } + return LNil +} + +func (ls *LState) Push(value LValue) { + ls.reg.Push(value) +} + +func (ls *LState) Pop(n int) { + for i := 0; i < n; i++ { + if ls.GetTop() == 0 { + ls.RaiseError("register underflow") + } + ls.reg.Pop() + } +} + +func (ls *LState) Insert(value LValue, index int) { + reg := ls.indexToReg(index) + top := ls.reg.Top() + if reg >= top { + ls.reg.Set(reg, value) + return + } + if reg <= ls.currentLocalBase() { + reg = ls.currentLocalBase() + } + top-- + for ; top >= reg; top-- { + ls.reg.Set(top+1, ls.reg.Get(top)) + } + ls.reg.Set(reg, value) +} + +func (ls *LState) Remove(index int) { + reg := ls.indexToReg(index) + top := ls.reg.Top() + switch { + case reg >= top: + return + case reg < ls.currentLocalBase(): + return + case reg == top-1: + ls.Pop(1) + return + } + for i := reg; i < top-1; i++ { + ls.reg.Set(i, ls.reg.Get(i+1)) + } + ls.reg.SetTop(top - 1) +} + +/* }}} */ + +/* object allocation {{{ */ + +func (ls *LState) NewTable() *LTable { + return newLTable(defaultArrayCap, defaultHashCap) +} + +func (ls *LState) CreateTable(acap, hcap int) *LTable { + return newLTable(acap, hcap) +} + +// NewThread returns a new LState that shares with the original state all global objects. +// If the original state has context.Context, the new state has a new child context of the original state and this function returns its cancel function. +func (ls *LState) NewThread() (*LState, context.CancelFunc) { + thread := newLState(ls.Options) + thread.G = ls.G + thread.Env = ls.Env + var f context.CancelFunc = nil + if ls.ctx != nil { + thread.mainLoop = mainLoopWithContext + thread.ctx, f = context.WithCancel(ls.ctx) + } + return thread, f +} + +func (ls *LState) NewUserData() *LUserData { + return &LUserData{ + Env: ls.currentEnv(), + Metatable: LNil, + } +} + +func (ls *LState) NewFunction(fn LGFunction) *LFunction { + return newLFunctionG(fn, ls.currentEnv(), 0) +} + +func (ls *LState) NewClosure(fn LGFunction, upvalues ...LValue) *LFunction { + cl := newLFunctionG(fn, ls.currentEnv(), len(upvalues)) + for i, lv := range upvalues { + cl.Upvalues[i] = &Upvalue{} + cl.Upvalues[i].Close() + cl.Upvalues[i].SetValue(lv) + } + return cl +} + +/* }}} */ + +/* toType {{{ */ + +func (ls *LState) ToBool(n int) bool { + return LVAsBool(ls.Get(n)) +} + +func (ls *LState) ToInt(n int) int { + if lv, ok := ls.Get(n).(LNumber); ok { + return int(lv) + } + if lv, ok := ls.Get(n).(LString); ok { + if num, err := parseNumber(string(lv)); err == nil { + return int(num) + } + } + return 0 +} + +func (ls *LState) ToInt64(n int) int64 { + if lv, ok := ls.Get(n).(LNumber); ok { + return int64(lv) + } + if lv, ok := ls.Get(n).(LString); ok { + if num, err := parseNumber(string(lv)); err == nil { + return int64(num) + } + } + return 0 +} + +func (ls *LState) ToNumber(n int) LNumber { + return LVAsNumber(ls.Get(n)) +} + +func (ls *LState) ToString(n int) string { + return LVAsString(ls.Get(n)) +} + +func (ls *LState) ToTable(n int) *LTable { + if lv, ok := ls.Get(n).(*LTable); ok { + return lv + } + return nil +} + +func (ls *LState) ToFunction(n int) *LFunction { + if lv, ok := ls.Get(n).(*LFunction); ok { + return lv + } + return nil +} + +func (ls *LState) ToUserData(n int) *LUserData { + if lv, ok := ls.Get(n).(*LUserData); ok { + return lv + } + return nil +} + +func (ls *LState) ToThread(n int) *LState { + if lv, ok := ls.Get(n).(*LState); ok { + return lv + } + return nil +} + +/* }}} */ + +/* error & debug operations {{{ */ + +// This function is equivalent to luaL_error( http://www.lua.org/manual/5.1/manual.html#luaL_error ). +func (ls *LState) RaiseError(format string, args ...interface{}) { + ls.raiseError(1, format, args...) +} + +// This function is equivalent to lua_error( http://www.lua.org/manual/5.1/manual.html#lua_error ). +func (ls *LState) Error(lv LValue, level int) { + if str, ok := lv.(LString); ok { + ls.raiseError(level, string(str)) + } else { + if !ls.hasErrorFunc { + ls.closeAllUpvalues() + } + ls.Push(lv) + ls.Panic(ls) + } +} + +func (ls *LState) GetInfo(what string, dbg *Debug, fn LValue) (LValue, error) { + if !strings.HasPrefix(what, ">") { + fn = dbg.frame.Fn + } else { + what = what[1:] + } + f, ok := fn.(*LFunction) + if !ok { + return LNil, newApiErrorS(ApiErrorRun, "can not get debug info(an object in not a function)") + } + + retfn := false + for _, c := range what { + switch c { + case 'f': + retfn = true + case 'S': + if dbg.frame != nil && dbg.frame.Parent == nil { + dbg.What = "main" + } else if f.IsG { + dbg.What = "G" + } else if dbg.frame != nil && dbg.frame.TailCall > 0 { + dbg.What = "tail" + } else { + dbg.What = "Lua" + } + if !f.IsG { + dbg.Source = f.Proto.SourceName + dbg.LineDefined = f.Proto.LineDefined + dbg.LastLineDefined = f.Proto.LastLineDefined + } + case 'l': + if !f.IsG && dbg.frame != nil { + if dbg.frame.Pc > 0 { + dbg.CurrentLine = f.Proto.DbgSourcePositions[dbg.frame.Pc-1] + } + } else { + dbg.CurrentLine = -1 + } + case 'u': + dbg.NUpvalues = len(f.Upvalues) + case 'n': + if dbg.frame != nil { + dbg.Name = ls.rawFrameFuncName(dbg.frame) + } + default: + return LNil, newApiErrorS(ApiErrorRun, "invalid what: "+string(c)) + } + } + + if retfn { + return f, nil + } + return LNil, nil + +} + +func (ls *LState) GetStack(level int) (*Debug, bool) { + frame := ls.currentFrame + for ; level > 0 && frame != nil; frame = frame.Parent { + level-- + if !frame.Fn.IsG { + level -= frame.TailCall + } + } + + if level == 0 && frame != nil { + return &Debug{frame: frame}, true + } else if level < 0 && ls.stack.Sp() > 0 { + return &Debug{frame: ls.stack.At(0)}, true + } + return &Debug{}, false +} + +func (ls *LState) GetLocal(dbg *Debug, no int) (string, LValue) { + frame := dbg.frame + if name := ls.findLocal(frame, no); len(name) > 0 { + return name, ls.reg.Get(frame.LocalBase + no - 1) + } + return "", LNil +} + +func (ls *LState) SetLocal(dbg *Debug, no int, lv LValue) string { + frame := dbg.frame + if name := ls.findLocal(frame, no); len(name) > 0 { + ls.reg.Set(frame.LocalBase+no-1, lv) + return name + } + return "" +} + +func (ls *LState) GetUpvalue(fn *LFunction, no int) (string, LValue) { + if fn.IsG { + return "", LNil + } + + no-- + if no >= 0 && no < len(fn.Upvalues) { + return fn.Proto.DbgUpvalues[no], fn.Upvalues[no].Value() + } + return "", LNil +} + +func (ls *LState) SetUpvalue(fn *LFunction, no int, lv LValue) string { + if fn.IsG { + return "" + } + + no-- + if no >= 0 && no < len(fn.Upvalues) { + fn.Upvalues[no].SetValue(lv) + return fn.Proto.DbgUpvalues[no] + } + return "" +} + +/* }}} */ + +/* env operations {{{ */ + +func (ls *LState) GetFEnv(obj LValue) LValue { + switch lv := obj.(type) { + case *LFunction: + return lv.Env + case *LUserData: + return lv.Env + case *LState: + return lv.Env + } + return LNil +} + +func (ls *LState) SetFEnv(obj LValue, env LValue) { + tb, ok := env.(*LTable) + if !ok { + ls.RaiseError("cannot use %v as an environment", env.Type().String()) + } + + switch lv := obj.(type) { + case *LFunction: + lv.Env = tb + case *LUserData: + lv.Env = tb + case *LState: + lv.Env = tb + } + /* do nothing */ +} + +/* }}} */ + +/* table operations {{{ */ + +func (ls *LState) RawGet(tb *LTable, key LValue) LValue { + return tb.RawGet(key) +} + +func (ls *LState) RawGetInt(tb *LTable, key int) LValue { + return tb.RawGetInt(key) +} + +func (ls *LState) GetField(obj LValue, skey string) LValue { + return ls.getFieldString(obj, skey) +} + +func (ls *LState) GetTable(obj LValue, key LValue) LValue { + return ls.getField(obj, key) +} + +func (ls *LState) RawSet(tb *LTable, key LValue, value LValue) { + if n, ok := key.(LNumber); ok && math.IsNaN(float64(n)) { + ls.RaiseError("table index is NaN") + } else if key == LNil { + ls.RaiseError("table index is nil") + } + tb.RawSet(key, value) +} + +func (ls *LState) RawSetInt(tb *LTable, key int, value LValue) { + tb.RawSetInt(key, value) +} + +func (ls *LState) SetField(obj LValue, key string, value LValue) { + ls.setFieldString(obj, key, value) +} + +func (ls *LState) SetTable(obj LValue, key LValue, value LValue) { + ls.setField(obj, key, value) +} + +func (ls *LState) ForEach(tb *LTable, cb func(LValue, LValue)) { + tb.ForEach(cb) +} + +func (ls *LState) GetGlobal(name string) LValue { + return ls.GetField(ls.Get(GlobalsIndex), name) +} + +func (ls *LState) SetGlobal(name string, value LValue) { + ls.SetField(ls.Get(GlobalsIndex), name, value) +} + +func (ls *LState) Next(tb *LTable, key LValue) (LValue, LValue) { + return tb.Next(key) +} + +/* }}} */ + +/* unary operations {{{ */ + +func (ls *LState) ObjLen(v1 LValue) int { + if v1.Type() == LTString { + return len(string(v1.(LString))) + } + op := ls.metaOp1(v1, "__len") + if op.Type() == LTFunction { + ls.Push(op) + ls.Push(v1) + ls.Call(1, 1) + ret := ls.reg.Pop() + if ret.Type() == LTNumber { + return int(ret.(LNumber)) + } + } else if v1.Type() == LTTable { + return v1.(*LTable).Len() + } + return 0 +} + +/* }}} */ + +/* binary operations {{{ */ + +func (ls *LState) Concat(values ...LValue) string { + top := ls.reg.Top() + for _, value := range values { + ls.reg.Push(value) + } + ret := stringConcat(ls, len(values), ls.reg.Top()-1) + ls.reg.SetTop(top) + return LVAsString(ret) +} + +func (ls *LState) LessThan(lhs, rhs LValue) bool { + return lessThan(ls, lhs, rhs) +} + +func (ls *LState) Equal(lhs, rhs LValue) bool { + return equals(ls, lhs, rhs, false) +} + +func (ls *LState) RawEqual(lhs, rhs LValue) bool { + return equals(ls, lhs, rhs, true) +} + +/* }}} */ + +/* register operations {{{ */ + +func (ls *LState) Register(name string, fn LGFunction) { + ls.SetGlobal(name, ls.NewFunction(fn)) +} + +/* }}} */ + +/* load and function call operations {{{ */ + +func (ls *LState) Load(reader io.Reader, name string) (*LFunction, error) { + chunk, err := parse.Parse(reader, name) + if err != nil { + return nil, newApiErrorE(ApiErrorSyntax, err) + } + proto, err := Compile(chunk, name) + if err != nil { + return nil, newApiErrorE(ApiErrorSyntax, err) + } + return newLFunctionL(proto, ls.currentEnv(), 0), nil +} + +func (ls *LState) Call(nargs, nret int) { + ls.callR(nargs, nret, -1) +} + +func (ls *LState) PCall(nargs, nret int, errfunc *LFunction) (err error) { + err = nil + sp := ls.stack.Sp() + base := ls.reg.Top() - nargs - 1 + oldpanic := ls.Panic + ls.Panic = panicWithoutTraceback + if errfunc != nil { + ls.hasErrorFunc = true + } + defer func() { + ls.Panic = oldpanic + ls.hasErrorFunc = false + rcv := recover() + if rcv != nil { + if _, ok := rcv.(*ApiError); !ok { + err = newApiErrorS(ApiErrorPanic, fmt.Sprint(rcv)) + if ls.Options.IncludeGoStackTrace { + buf := make([]byte, 4096) + runtime.Stack(buf, false) + err.(*ApiError).StackTrace = strings.Trim(string(buf), "\000") + "\n" + ls.stackTrace(0) + } + } else { + err = rcv.(*ApiError) + } + if errfunc != nil { + ls.Push(errfunc) + ls.Push(err.(*ApiError).Object) + ls.Panic = panicWithoutTraceback + defer func() { + ls.Panic = oldpanic + rcv := recover() + if rcv != nil { + if _, ok := rcv.(*ApiError); !ok { + err = newApiErrorS(ApiErrorPanic, fmt.Sprint(rcv)) + if ls.Options.IncludeGoStackTrace { + buf := make([]byte, 4096) + runtime.Stack(buf, false) + err.(*ApiError).StackTrace = strings.Trim(string(buf), "\000") + ls.stackTrace(0) + } + } else { + err = rcv.(*ApiError) + err.(*ApiError).StackTrace = ls.stackTrace(0) + } + } + }() + ls.Call(1, 1) + err = newApiError(ApiErrorError, ls.Get(-1)) + } else if len(err.(*ApiError).StackTrace) == 0 { + err.(*ApiError).StackTrace = ls.stackTrace(0) + } + ls.reg.SetTop(base) + } + ls.stack.SetSp(sp) + if sp == 0 { + ls.currentFrame = nil + } + }() + + ls.Call(nargs, nret) + + return +} + +func (ls *LState) GPCall(fn LGFunction, data LValue) error { + ls.Push(newLFunctionG(fn, ls.currentEnv(), 0)) + ls.Push(data) + return ls.PCall(1, MultRet, nil) +} + +func (ls *LState) CallByParam(cp P, args ...LValue) error { + ls.Push(cp.Fn) + for _, arg := range args { + ls.Push(arg) + } + + if cp.Protect { + return ls.PCall(len(args), cp.NRet, cp.Handler) + } + ls.Call(len(args), cp.NRet) + return nil +} + +/* }}} */ + +/* metatable operations {{{ */ + +func (ls *LState) GetMetatable(obj LValue) LValue { + return ls.metatable(obj, false) +} + +func (ls *LState) SetMetatable(obj LValue, mt LValue) { + switch mt.(type) { + case *LNilType, *LTable: + default: + ls.RaiseError("metatable must be a table or nil, but got %v", mt.Type().String()) + } + + switch v := obj.(type) { + case *LTable: + v.Metatable = mt + case *LUserData: + v.Metatable = mt + default: + ls.G.builtinMts[int(obj.Type())] = mt + } +} + +/* }}} */ + +/* coroutine operations {{{ */ + +func (ls *LState) Status(th *LState) string { + status := "suspended" + if th.Dead { + status = "dead" + } else if ls.G.CurrentThread == th { + status = "running" + } else if ls.Parent == th { + status = "normal" + } + return status +} + +func (ls *LState) Resume(th *LState, fn *LFunction, args ...LValue) (ResumeState, error, []LValue) { + isstarted := th.isStarted() + if !isstarted { + base := 0 + th.stack.Push(callFrame{ + Fn: fn, + Pc: 0, + Base: base, + LocalBase: base + 1, + ReturnBase: base, + NArgs: 0, + NRet: MultRet, + Parent: nil, + TailCall: 0, + }) + } + + if ls.G.CurrentThread == th { + return ResumeError, newApiErrorS(ApiErrorRun, "can not resume a running thread"), nil + } + if th.Dead { + return ResumeError, newApiErrorS(ApiErrorRun, "can not resume a dead thread"), nil + } + th.Parent = ls + ls.G.CurrentThread = th + if !isstarted { + cf := th.stack.Last() + th.currentFrame = cf + th.SetTop(0) + for _, arg := range args { + th.Push(arg) + } + cf.NArgs = len(args) + th.initCallFrame(cf) + th.Panic = panicWithoutTraceback + } else { + for _, arg := range args { + th.Push(arg) + } + } + top := ls.GetTop() + threadRun(th) + haserror := LVIsFalse(ls.Get(top + 1)) + ret := make([]LValue, 0, ls.GetTop()) + for idx := top + 2; idx <= ls.GetTop(); idx++ { + ret = append(ret, ls.Get(idx)) + } + if len(ret) == 0 { + ret = append(ret, LNil) + } + ls.SetTop(top) + + if haserror { + return ResumeError, newApiError(ApiErrorRun, ret[0]), nil + } else if th.stack.IsEmpty() { + return ResumeOK, nil, ret + } + return ResumeYield, nil, ret +} + +func (ls *LState) Yield(values ...LValue) int { + ls.SetTop(0) + for _, lv := range values { + ls.Push(lv) + } + return -1 +} + +func (ls *LState) XMoveTo(other *LState, n int) { + if ls == other { + return + } + top := ls.GetTop() + n = intMin(n, top) + for i := n; i > 0; i-- { + other.Push(ls.Get(top - i + 1)) + } + ls.SetTop(top - n) +} + +/* }}} */ + +/* GopherLua original APIs {{{ */ + +// Set maximum memory size. This function can only be called from the main thread. +func (ls *LState) SetMx(mx int) { + if ls.Parent != nil { + ls.RaiseError("sub threads are not allowed to set a memory limit") + } + go func() { + limit := uint64(mx * 1024 * 1024) //MB + var s runtime.MemStats + for ls.stop == 0 { + runtime.ReadMemStats(&s) + if s.Alloc >= limit { + fmt.Println("out of memory") + os.Exit(3) + } + time.Sleep(100 * time.Millisecond) + } + }() +} + +// SetContext set a context ctx to this LState. The provided ctx must be non-nil. +func (ls *LState) SetContext(ctx context.Context) { + ls.mainLoop = mainLoopWithContext + ls.ctx = ctx +} + +// Context returns the LState's context. To change the context, use WithContext. +func (ls *LState) Context() context.Context { + return ls.ctx +} + +// RemoveContext removes the context associated with this LState and returns this context. +func (ls *LState) RemoveContext() context.Context { + oldctx := ls.ctx + ls.mainLoop = mainLoop + ls.ctx = nil + return oldctx +} + +// Converts the Lua value at the given acceptable index to the chan LValue. +func (ls *LState) ToChannel(n int) chan LValue { + if lv, ok := ls.Get(n).(LChannel); ok { + return (chan LValue)(lv) + } + return nil +} + +/* }}} */ + +/* }}} */ + +// diff --git a/vendor/github.com/yuin/gopher-lua/stringlib.go b/vendor/github.com/yuin/gopher-lua/stringlib.go new file mode 100644 index 00000000000..59de78f58e9 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/stringlib.go @@ -0,0 +1,442 @@ +package lua + +import ( + "fmt" + "strings" + + "github.com/yuin/gopher-lua/pm" +) + +func OpenString(L *LState) int { + var mod *LTable + //_, ok := L.G.builtinMts[int(LTString)] + //if !ok { + mod = L.RegisterModule(StringLibName, strFuncs).(*LTable) + gmatch := L.NewClosure(strGmatch, L.NewFunction(strGmatchIter)) + mod.RawSetString("gmatch", gmatch) + mod.RawSetString("gfind", gmatch) + mod.RawSetString("__index", mod) + L.G.builtinMts[int(LTString)] = mod + //} + L.Push(mod) + return 1 +} + +var strFuncs = map[string]LGFunction{ + "byte": strByte, + "char": strChar, + "dump": strDump, + "find": strFind, + "format": strFormat, + "gsub": strGsub, + "len": strLen, + "lower": strLower, + "match": strMatch, + "rep": strRep, + "reverse": strReverse, + "sub": strSub, + "upper": strUpper, +} + +func strByte(L *LState) int { + str := L.CheckString(1) + start := L.OptInt(2, 1) - 1 + end := L.OptInt(3, -1) + l := len(str) + if start < 0 { + start = l + start + 1 + } + if end < 0 { + end = l + end + 1 + } + + if L.GetTop() == 2 { + if start < 0 || start >= l { + return 0 + } + L.Push(LNumber(str[start])) + return 1 + } + + start = intMax(start, 0) + end = intMin(end, l) + if end < 0 || end <= start || start >= l { + return 0 + } + + for i := start; i < end; i++ { + L.Push(LNumber(str[i])) + } + return end - start +} + +func strChar(L *LState) int { + top := L.GetTop() + bytes := make([]byte, L.GetTop()) + for i := 1; i <= top; i++ { + bytes[i-1] = uint8(L.CheckInt(i)) + } + L.Push(LString(string(bytes))) + return 1 +} + +func strDump(L *LState) int { + L.RaiseError("GopherLua does not support the string.dump") + return 0 +} + +func strFind(L *LState) int { + str := L.CheckString(1) + pattern := L.CheckString(2) + if len(pattern) == 0 { + L.Push(LNumber(1)) + L.Push(LNumber(0)) + return 2 + } + init := luaIndex2StringIndex(str, L.OptInt(3, 1), true) + plain := false + if L.GetTop() == 4 { + plain = LVAsBool(L.Get(4)) + } + + if plain { + pos := strings.Index(str[init:], pattern) + if pos < 0 { + L.Push(LNil) + return 1 + } + L.Push(LNumber(init+pos) + 1) + L.Push(LNumber(init + pos + len(pattern))) + return 2 + } + + mds, err := pm.Find(pattern, unsafeFastStringToReadOnlyBytes(str), init, 1) + if err != nil { + L.RaiseError(err.Error()) + } + if len(mds) == 0 { + L.Push(LNil) + return 1 + } + md := mds[0] + L.Push(LNumber(md.Capture(0) + 1)) + L.Push(LNumber(md.Capture(1))) + for i := 2; i < md.CaptureLength(); i += 2 { + if md.IsPosCapture(i) { + L.Push(LNumber(md.Capture(i))) + } else { + L.Push(LString(str[md.Capture(i):md.Capture(i+1)])) + } + } + return md.CaptureLength()/2 + 1 +} + +func strFormat(L *LState) int { + str := L.CheckString(1) + args := make([]interface{}, L.GetTop()-1) + top := L.GetTop() + for i := 2; i <= top; i++ { + args[i-2] = L.Get(i) + } + npat := strings.Count(str, "%") - strings.Count(str, "%%") + L.Push(LString(fmt.Sprintf(str, args[:intMin(npat, len(args))]...))) + return 1 +} + +func strGsub(L *LState) int { + str := L.CheckString(1) + pat := L.CheckString(2) + L.CheckTypes(3, LTString, LTTable, LTFunction) + repl := L.CheckAny(3) + limit := L.OptInt(4, -1) + + mds, err := pm.Find(pat, unsafeFastStringToReadOnlyBytes(str), 0, limit) + if err != nil { + L.RaiseError(err.Error()) + } + if len(mds) == 0 { + L.SetTop(1) + L.Push(LNumber(0)) + return 2 + } + switch lv := repl.(type) { + case LString: + L.Push(LString(strGsubStr(L, str, string(lv), mds))) + case *LTable: + L.Push(LString(strGsubTable(L, str, lv, mds))) + case *LFunction: + L.Push(LString(strGsubFunc(L, str, lv, mds))) + } + L.Push(LNumber(len(mds))) + return 2 +} + +type replaceInfo struct { + Indicies []int + String string +} + +func checkCaptureIndex(L *LState, m *pm.MatchData, idx int) { + if idx <= 2 { + return + } + if idx >= m.CaptureLength() { + L.RaiseError("invalid capture index") + } +} + +func capturedString(L *LState, m *pm.MatchData, str string, idx int) string { + checkCaptureIndex(L, m, idx) + if idx >= m.CaptureLength() && idx == 2 { + idx = 0 + } + if m.IsPosCapture(idx) { + return fmt.Sprint(m.Capture(idx)) + } else { + return str[m.Capture(idx):m.Capture(idx+1)] + } + +} + +func strGsubDoReplace(str string, info []replaceInfo) string { + offset := 0 + buf := []byte(str) + for _, replace := range info { + oldlen := len(buf) + b1 := append([]byte(""), buf[0:offset+replace.Indicies[0]]...) + b2 := []byte("") + index2 := offset + replace.Indicies[1] + if index2 <= len(buf) { + b2 = append(b2, buf[index2:len(buf)]...) + } + buf = append(b1, replace.String...) + buf = append(buf, b2...) + offset += len(buf) - oldlen + } + return string(buf) +} + +func strGsubStr(L *LState, str string, repl string, matches []*pm.MatchData) string { + infoList := make([]replaceInfo, 0, len(matches)) + for _, match := range matches { + start, end := match.Capture(0), match.Capture(1) + sc := newFlagScanner('%', "", "", repl) + for c, eos := sc.Next(); !eos; c, eos = sc.Next() { + if !sc.ChangeFlag { + if sc.HasFlag { + if c >= '0' && c <= '9' { + sc.AppendString(capturedString(L, match, str, 2*(int(c)-48))) + } else { + sc.AppendChar('%') + sc.AppendChar(c) + } + sc.HasFlag = false + } else { + sc.AppendChar(c) + } + } + } + infoList = append(infoList, replaceInfo{[]int{start, end}, sc.String()}) + } + + return strGsubDoReplace(str, infoList) +} + +func strGsubTable(L *LState, str string, repl *LTable, matches []*pm.MatchData) string { + infoList := make([]replaceInfo, 0, len(matches)) + for _, match := range matches { + idx := 0 + if match.CaptureLength() > 2 { // has captures + idx = 2 + } + var value LValue + if match.IsPosCapture(idx) { + value = L.GetTable(repl, LNumber(match.Capture(idx))) + } else { + value = L.GetField(repl, str[match.Capture(idx):match.Capture(idx+1)]) + } + if !LVIsFalse(value) { + infoList = append(infoList, replaceInfo{[]int{match.Capture(0), match.Capture(1)}, LVAsString(value)}) + } + } + return strGsubDoReplace(str, infoList) +} + +func strGsubFunc(L *LState, str string, repl *LFunction, matches []*pm.MatchData) string { + infoList := make([]replaceInfo, 0, len(matches)) + for _, match := range matches { + start, end := match.Capture(0), match.Capture(1) + L.Push(repl) + nargs := 0 + if match.CaptureLength() > 2 { // has captures + for i := 2; i < match.CaptureLength(); i += 2 { + if match.IsPosCapture(i) { + L.Push(LNumber(match.Capture(i))) + } else { + L.Push(LString(capturedString(L, match, str, i))) + } + nargs++ + } + } else { + L.Push(LString(capturedString(L, match, str, 0))) + nargs++ + } + L.Call(nargs, 1) + ret := L.reg.Pop() + if !LVIsFalse(ret) { + infoList = append(infoList, replaceInfo{[]int{start, end}, LVAsString(ret)}) + } + } + return strGsubDoReplace(str, infoList) +} + +type strMatchData struct { + str string + pos int + matches []*pm.MatchData +} + +func strGmatchIter(L *LState) int { + md := L.CheckUserData(1).Value.(*strMatchData) + str := md.str + matches := md.matches + idx := md.pos + md.pos += 1 + if idx == len(matches) { + return 0 + } + L.Push(L.Get(1)) + match := matches[idx] + if match.CaptureLength() == 2 { + L.Push(LString(str[match.Capture(0):match.Capture(1)])) + return 1 + } + + for i := 2; i < match.CaptureLength(); i += 2 { + if match.IsPosCapture(i) { + L.Push(LNumber(match.Capture(i))) + } else { + L.Push(LString(str[match.Capture(i):match.Capture(i+1)])) + } + } + return match.CaptureLength()/2 - 1 +} + +func strGmatch(L *LState) int { + str := L.CheckString(1) + pattern := L.CheckString(2) + mds, err := pm.Find(pattern, []byte(str), 0, -1) + if err != nil { + L.RaiseError(err.Error()) + } + L.Push(L.Get(UpvalueIndex(1))) + ud := L.NewUserData() + ud.Value = &strMatchData{str, 0, mds} + L.Push(ud) + return 2 +} + +func strLen(L *LState) int { + str := L.CheckString(1) + L.Push(LNumber(len(str))) + return 1 +} + +func strLower(L *LState) int { + str := L.CheckString(1) + L.Push(LString(strings.ToLower(str))) + return 1 +} + +func strMatch(L *LState) int { + str := L.CheckString(1) + pattern := L.CheckString(2) + offset := L.OptInt(3, 1) + l := len(str) + if offset < 0 { + offset = l + offset + 1 + } + offset-- + if offset < 0 { + offset = 0 + } + + mds, err := pm.Find(pattern, unsafeFastStringToReadOnlyBytes(str), offset, 1) + if err != nil { + L.RaiseError(err.Error()) + } + if len(mds) == 0 { + L.Push(LNil) + return 0 + } + md := mds[0] + nsubs := md.CaptureLength() / 2 + switch nsubs { + case 1: + L.Push(LString(str[md.Capture(0):md.Capture(1)])) + return 1 + default: + for i := 2; i < md.CaptureLength(); i += 2 { + if md.IsPosCapture(i) { + L.Push(LNumber(md.Capture(i))) + } else { + L.Push(LString(str[md.Capture(i):md.Capture(i+1)])) + } + } + return nsubs - 1 + } +} + +func strRep(L *LState) int { + str := L.CheckString(1) + n := L.CheckInt(2) + L.Push(LString(strings.Repeat(str, n))) + return 1 +} + +func strReverse(L *LState) int { + str := L.CheckString(1) + bts := []byte(str) + out := make([]byte, len(bts)) + for i, j := 0, len(bts)-1; j >= 0; i, j = i+1, j-1 { + out[i] = bts[j] + } + L.Push(LString(string(out))) + return 1 +} + +func strSub(L *LState) int { + str := L.CheckString(1) + start := luaIndex2StringIndex(str, L.CheckInt(2), true) + end := luaIndex2StringIndex(str, L.OptInt(3, -1), false) + l := len(str) + if start >= l || end < start { + L.Push(LString("")) + } else { + L.Push(LString(str[start:end])) + } + return 1 +} + +func strUpper(L *LState) int { + str := L.CheckString(1) + L.Push(LString(strings.ToUpper(str))) + return 1 +} + +func luaIndex2StringIndex(str string, i int, start bool) int { + if start && i != 0 { + i -= 1 + } + l := len(str) + if i < 0 { + i = l + i + 1 + } + i = intMax(0, i) + if !start && i > l { + i = l + } + return i +} + +// diff --git a/vendor/github.com/yuin/gopher-lua/table.go b/vendor/github.com/yuin/gopher-lua/table.go new file mode 100644 index 00000000000..71c8b086dca --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/table.go @@ -0,0 +1,388 @@ +package lua + +const defaultArrayCap = 32 +const defaultHashCap = 32 + +type lValueArraySorter struct { + L *LState + Fn *LFunction + Values []LValue +} + +func (lv lValueArraySorter) Len() int { + return len(lv.Values) +} + +func (lv lValueArraySorter) Swap(i, j int) { + lv.Values[i], lv.Values[j] = lv.Values[j], lv.Values[i] +} + +func (lv lValueArraySorter) Less(i, j int) bool { + if lv.Fn != nil { + lv.L.Push(lv.Fn) + lv.L.Push(lv.Values[i]) + lv.L.Push(lv.Values[j]) + lv.L.Call(2, 1) + return LVAsBool(lv.L.reg.Pop()) + } + return lessThan(lv.L, lv.Values[i], lv.Values[j]) +} + +func newLTable(acap int, hcap int) *LTable { + if acap < 0 { + acap = 0 + } + if hcap < 0 { + hcap = 0 + } + tb := <able{} + tb.keys = nil + tb.k2i = nil + tb.Metatable = LNil + if acap != 0 { + tb.array = make([]LValue, 0, acap) + } + if hcap != 0 { + tb.strdict = make(map[string]LValue, hcap) + } + return tb +} + +// Len returns length of this LTable. +func (tb *LTable) Len() int { + if tb.array == nil { + return 0 + } + var prev LValue = LNil + for i := len(tb.array) - 1; i >= 0; i-- { + v := tb.array[i] + if prev == LNil && v != LNil { + return i + 1 + } + prev = v + } + return 0 +} + +// Append appends a given LValue to this LTable. +func (tb *LTable) Append(value LValue) { + if tb.array == nil { + tb.array = make([]LValue, 0, defaultArrayCap) + } + tb.array = append(tb.array, value) +} + +// Insert inserts a given LValue at position `i` in this table. +func (tb *LTable) Insert(i int, value LValue) { + if tb.array == nil { + tb.array = make([]LValue, 0, defaultArrayCap) + } + if i > len(tb.array) { + tb.RawSetInt(i, value) + return + } + if i <= 0 { + tb.RawSet(LNumber(i), value) + return + } + i -= 1 + tb.array = append(tb.array, LNil) + copy(tb.array[i+1:], tb.array[i:]) + tb.array[i] = value +} + +// MaxN returns a maximum number key that nil value does not exist before it. +func (tb *LTable) MaxN() int { + if tb.array == nil { + return 0 + } + for i := len(tb.array) - 1; i >= 0; i-- { + if tb.array[i] != LNil { + return i + 1 + } + } + return 0 +} + +// Remove removes from this table the element at a given position. +func (tb *LTable) Remove(pos int) LValue { + if tb.array == nil { + return LNil + } + i := pos - 1 + larray := len(tb.array) + oldval := LNil + switch { + case i >= larray: + // nothing to do + case i == larray-1 || i < 0: + oldval = tb.array[larray-1] + tb.array = tb.array[:larray-1] + default: + oldval = tb.array[i] + copy(tb.array[i:], tb.array[i+1:]) + tb.array[larray-1] = nil + tb.array = tb.array[:larray-1] + } + return oldval +} + +// RawSet sets a given LValue to a given index without the __newindex metamethod. +// It is recommended to use `RawSetString` or `RawSetInt` for performance +// if you already know the given LValue is a string or number. +func (tb *LTable) RawSet(key LValue, value LValue) { + switch v := key.(type) { + case LNumber: + if isArrayKey(v) { + if tb.array == nil { + tb.array = make([]LValue, 0, defaultArrayCap) + } + index := int(v) - 1 + alen := len(tb.array) + switch { + case index == alen: + tb.array = append(tb.array, value) + case index > alen: + for i := 0; i < (index - alen); i++ { + tb.array = append(tb.array, LNil) + } + tb.array = append(tb.array, value) + case index < alen: + tb.array[index] = value + } + return + } + case LString: + tb.RawSetString(string(v), value) + return + } + + tb.RawSetH(key, value) +} + +// RawSetInt sets a given LValue at a position `key` without the __newindex metamethod. +func (tb *LTable) RawSetInt(key int, value LValue) { + if key < 1 || key >= MaxArrayIndex { + tb.RawSetH(LNumber(key), value) + return + } + if tb.array == nil { + tb.array = make([]LValue, 0, 32) + } + index := key - 1 + alen := len(tb.array) + switch { + case index == alen: + tb.array = append(tb.array, value) + case index > alen: + for i := 0; i < (index - alen); i++ { + tb.array = append(tb.array, LNil) + } + tb.array = append(tb.array, value) + case index < alen: + tb.array[index] = value + } +} + +// RawSetString sets a given LValue to a given string index without the __newindex metamethod. +func (tb *LTable) RawSetString(key string, value LValue) { + if tb.strdict == nil { + tb.strdict = make(map[string]LValue, defaultHashCap) + } + if value == LNil { + delete(tb.strdict, key) + } else { + tb.strdict[key] = value + } +} + +// RawSetH sets a given LValue to a given index without the __newindex metamethod. +func (tb *LTable) RawSetH(key LValue, value LValue) { + if s, ok := key.(LString); ok { + tb.RawSetString(string(s), value) + return + } + if tb.dict == nil { + tb.dict = make(map[LValue]LValue, len(tb.strdict)) + } + + if value == LNil { + delete(tb.dict, key) + } else { + tb.dict[key] = value + } +} + +// RawGet returns an LValue associated with a given key without __index metamethod. +func (tb *LTable) RawGet(key LValue) LValue { + switch v := key.(type) { + case LNumber: + if isArrayKey(v) { + if tb.array == nil { + return LNil + } + index := int(v) - 1 + if index >= len(tb.array) { + return LNil + } + return tb.array[index] + } + case LString: + if tb.strdict == nil { + return LNil + } + if ret, ok := tb.strdict[string(v)]; ok { + return ret + } + return LNil + } + if tb.dict == nil { + return LNil + } + if v, ok := tb.dict[key]; ok { + return v + } + return LNil +} + +// RawGetInt returns an LValue at position `key` without __index metamethod. +func (tb *LTable) RawGetInt(key int) LValue { + if tb.array == nil { + return LNil + } + index := int(key) - 1 + if index >= len(tb.array) || index < 0 { + return LNil + } + return tb.array[index] +} + +// RawGet returns an LValue associated with a given key without __index metamethod. +func (tb *LTable) RawGetH(key LValue) LValue { + if s, sok := key.(LString); sok { + if tb.strdict == nil { + return LNil + } + if v, vok := tb.strdict[string(s)]; vok { + return v + } + return LNil + } + if tb.dict == nil { + return LNil + } + if v, ok := tb.dict[key]; ok { + return v + } + return LNil +} + +// RawGetString returns an LValue associated with a given key without __index metamethod. +func (tb *LTable) RawGetString(key string) LValue { + if tb.strdict == nil { + return LNil + } + if v, vok := tb.strdict[string(key)]; vok { + return v + } + return LNil +} + +// ForEach iterates over this table of elements, yielding each in turn to a given function. +func (tb *LTable) ForEach(cb func(LValue, LValue)) { + if tb.array != nil { + for i, v := range tb.array { + if v != LNil { + cb(LNumber(i+1), v) + } + } + } + if tb.strdict != nil { + for k, v := range tb.strdict { + if v != LNil { + cb(LString(k), v) + } + } + } + if tb.dict != nil { + for k, v := range tb.dict { + if v != LNil { + cb(k, v) + } + } + } +} + +// This function is equivalent to lua_next ( http://www.lua.org/manual/5.1/manual.html#lua_next ). +func (tb *LTable) Next(key LValue) (LValue, LValue) { + // TODO: inefficient way + init := false + if key == LNil { + tb.keys = nil + tb.k2i = nil + key = LNumber(0) + init = true + } + + length := 0 + if tb.dict != nil { + length += len(tb.dict) + } + if tb.strdict != nil { + length += len(tb.strdict) + } + + if tb.keys == nil { + tb.keys = make([]LValue, length) + tb.k2i = make(map[LValue]int) + i := 0 + if tb.dict != nil { + for k, _ := range tb.dict { + tb.keys[i] = k + tb.k2i[k] = i + i++ + } + } + if tb.strdict != nil { + for k, _ := range tb.strdict { + tb.keys[i] = LString(k) + tb.k2i[LString(k)] = i + i++ + } + } + } + + if init || key != LNumber(0) { + if kv, ok := key.(LNumber); ok && isInteger(kv) && int(kv) >= 0 { + index := int(kv) + if tb.array != nil { + for ; index < len(tb.array); index++ { + if v := tb.array[index]; v != LNil { + return LNumber(index + 1), v + } + } + } + if tb.array == nil || index == len(tb.array) { + if (tb.dict == nil || len(tb.dict) == 0) && (tb.strdict == nil || len(tb.strdict) == 0) { + tb.keys = nil + tb.k2i = nil + return LNil, LNil + } + key = tb.keys[0] + if v := tb.RawGetH(key); v != LNil { + return key, v + } + } + } + } + + for i := tb.k2i[key] + 1; i < length; i++ { + key = tb.keys[tb.k2i[key]+1] + if v := tb.RawGetH(key); v != LNil { + return key, v + } + } + tb.keys = nil + tb.k2i = nil + return LNil, LNil +} diff --git a/vendor/github.com/yuin/gopher-lua/tablelib.go b/vendor/github.com/yuin/gopher-lua/tablelib.go new file mode 100644 index 00000000000..39f3ba16687 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/tablelib.go @@ -0,0 +1,96 @@ +package lua + +import ( + "sort" +) + +func OpenTable(L *LState) int { + tabmod := L.RegisterModule(TabLibName, tableFuncs) + L.Push(tabmod) + return 1 +} + +var tableFuncs = map[string]LGFunction{ + "getn": tableGetN, + "concat": tableConcat, + "insert": tableInsert, + "maxn": tableMaxN, + "remove": tableRemove, + "sort": tableSort, +} + +func tableSort(L *LState) int { + tbl := L.CheckTable(1) + sorter := lValueArraySorter{L, nil, tbl.array} + if L.GetTop() != 1 { + sorter.Fn = L.CheckFunction(2) + } + sort.Sort(sorter) + return 0 +} + +func tableGetN(L *LState) int { + L.Push(LNumber(L.CheckTable(1).Len())) + return 1 +} + +func tableMaxN(L *LState) int { + L.Push(LNumber(L.CheckTable(1).MaxN())) + return 1 +} + +func tableRemove(L *LState) int { + tbl := L.CheckTable(1) + if L.GetTop() == 1 { + L.Push(tbl.Remove(-1)) + } else { + L.Push(tbl.Remove(L.CheckInt(2))) + } + return 1 +} + +func tableConcat(L *LState) int { + tbl := L.CheckTable(1) + sep := LString(L.OptString(2, "")) + i := L.OptInt(3, 1) + j := L.OptInt(4, tbl.Len()) + if L.GetTop() == 3 { + if i > tbl.Len() || i < 1 { + L.Push(LString("")) + return 1 + } + } + i = intMax(intMin(i, tbl.Len()), 1) + j = intMin(intMin(j, tbl.Len()), tbl.Len()) + if i > j { + L.Push(LString("")) + return 1 + } + //TODO should flushing? + retbottom := L.GetTop() + for ; i <= j; i++ { + L.Push(tbl.RawGetInt(i)) + if i != j { + L.Push(sep) + } + } + L.Push(stringConcat(L, L.GetTop()-retbottom, L.reg.Top()-1)) + return 1 +} + +func tableInsert(L *LState) int { + tbl := L.CheckTable(1) + nargs := L.GetTop() + if nargs == 1 { + L.RaiseError("wrong number of arguments") + } + + if L.GetTop() == 2 { + tbl.Append(L.Get(2)) + return 0 + } + tbl.Insert(int(L.CheckInt(2)), L.CheckAny(3)) + return 0 +} + +// diff --git a/vendor/github.com/yuin/gopher-lua/utils.go b/vendor/github.com/yuin/gopher-lua/utils.go new file mode 100644 index 00000000000..cb71bf53e87 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/utils.go @@ -0,0 +1,262 @@ +package lua + +import ( + "bufio" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" + "unsafe" +) + +func intMin(a, b int) int { + if a < b { + return a + } else { + return b + } +} + +func intMax(a, b int) int { + if a > b { + return a + } else { + return b + } +} + +func defaultFormat(v interface{}, f fmt.State, c rune) { + buf := make([]string, 0, 10) + buf = append(buf, "%") + for i := 0; i < 128; i++ { + if f.Flag(i) { + buf = append(buf, string(i)) + } + } + + if w, ok := f.Width(); ok { + buf = append(buf, strconv.Itoa(w)) + } + if p, ok := f.Precision(); ok { + buf = append(buf, "."+strconv.Itoa(p)) + } + buf = append(buf, string(c)) + format := strings.Join(buf, "") + fmt.Fprintf(f, format, v) +} + +type flagScanner struct { + flag byte + start string + end string + buf []byte + str string + Length int + Pos int + HasFlag bool + ChangeFlag bool +} + +func newFlagScanner(flag byte, start, end, str string) *flagScanner { + return &flagScanner{flag, start, end, make([]byte, 0, len(str)), str, len(str), 0, false, false} +} + +func (fs *flagScanner) AppendString(str string) { fs.buf = append(fs.buf, str...) } + +func (fs *flagScanner) AppendChar(ch byte) { fs.buf = append(fs.buf, ch) } + +func (fs *flagScanner) String() string { return string(fs.buf) } + +func (fs *flagScanner) Next() (byte, bool) { + c := byte('\000') + fs.ChangeFlag = false + if fs.Pos == fs.Length { + if fs.HasFlag { + fs.AppendString(fs.end) + } + return c, true + } else { + c = fs.str[fs.Pos] + if c == fs.flag { + if fs.Pos < (fs.Length-1) && fs.str[fs.Pos+1] == fs.flag { + fs.HasFlag = false + fs.AppendChar(fs.flag) + fs.Pos += 2 + return fs.Next() + } else if fs.Pos != fs.Length-1 { + if fs.HasFlag { + fs.AppendString(fs.end) + } + fs.AppendString(fs.start) + fs.ChangeFlag = true + fs.HasFlag = true + } + } + } + fs.Pos++ + return c, false +} + +var cDateFlagToGo = map[byte]string{ + 'a': "mon", 'A': "Monday", 'b': "Jan", 'B': "January", 'c': "02 Jan 06 15:04 MST", 'd': "02", + 'F': "2006-01-02", 'H': "15", 'I': "03", 'm': "01", 'M': "04", 'p': "PM", 'P': "pm", 'S': "05", + 'x': "15/04/05", 'X': "15:04:05", 'y': "06", 'Y': "2006", 'z': "-0700", 'Z': "MST"} + +func strftime(t time.Time, cfmt string) string { + sc := newFlagScanner('%', "", "", cfmt) + for c, eos := sc.Next(); !eos; c, eos = sc.Next() { + if !sc.ChangeFlag { + if sc.HasFlag { + if v, ok := cDateFlagToGo[c]; ok { + sc.AppendString(t.Format(v)) + } else { + switch c { + case 'w': + sc.AppendString(fmt.Sprint(int(t.Weekday()))) + default: + sc.AppendChar('%') + sc.AppendChar(c) + } + } + sc.HasFlag = false + } else { + sc.AppendChar(c) + } + } + } + + return sc.String() +} + +func isInteger(v LNumber) bool { + return float64(v) == float64(int64(v)) + //_, frac := math.Modf(float64(v)) + //return frac == 0.0 +} + +func isArrayKey(v LNumber) bool { + return isInteger(v) && v < LNumber(int((^uint(0))>>1)) && v > LNumber(0) && v < LNumber(MaxArrayIndex) +} + +func parseNumber(number string) (LNumber, error) { + var value LNumber + number = strings.Trim(number, " \t\n") + if v, err := strconv.ParseInt(number, 0, LNumberBit); err != nil { + if v2, err2 := strconv.ParseFloat(number, LNumberBit); err2 != nil { + return LNumber(0), err2 + } else { + value = LNumber(v2) + } + } else { + value = LNumber(v) + } + return value, nil +} + +func popenArgs(arg string) (string, []string) { + cmd := "/bin/sh" + args := []string{"-c"} + if LuaOS == "windows" { + cmd = "C:\\Windows\\system32\\cmd.exe" + args = []string{"/c"} + } + args = append(args, arg) + return cmd, args +} + +func isGoroutineSafe(lv LValue) bool { + switch v := lv.(type) { + case *LFunction, *LUserData, *LState: + return false + case *LTable: + return v.Metatable == LNil + default: + return true + } +} + +func readBufioSize(reader *bufio.Reader, size int64) ([]byte, error, bool) { + result := []byte{} + read := int64(0) + var err error + var n int + for read != size { + buf := make([]byte, size-read) + n, err = reader.Read(buf) + if err != nil { + break + } + read += int64(n) + result = append(result, buf[:n]...) + } + e := err + if e != nil && e == io.EOF { + e = nil + } + + return result, e, len(result) == 0 && err == io.EOF +} + +func readBufioLine(reader *bufio.Reader) ([]byte, error, bool) { + result := []byte{} + var buf []byte + var err error + var isprefix bool = true + for isprefix { + buf, isprefix, err = reader.ReadLine() + if err != nil { + break + } + result = append(result, buf...) + } + e := err + if e != nil && e == io.EOF { + e = nil + } + + return result, e, len(result) == 0 && err == io.EOF +} + +func int2Fb(val int) int { + e := 0 + x := val + for x >= 16 { + x = (x + 1) >> 1 + e++ + } + if x < 8 { + return x + } + return ((e + 1) << 3) | (x - 8) +} + +func strCmp(s1, s2 string) int { + len1 := len(s1) + len2 := len(s2) + for i := 0; ; i++ { + c1 := -1 + if i < len1 { + c1 = int(s1[i]) + } + c2 := -1 + if i != len2 { + c2 = int(s2[i]) + } + switch { + case c1 < c2: + return -1 + case c1 > c2: + return +1 + case c1 < 0: + return 0 + } + } +} + +func unsafeFastStringToReadOnlyBytes(s string) []byte { + sh := (*reflect.StringHeader)(unsafe.Pointer(&s)) + bh := reflect.SliceHeader{sh.Data, sh.Len, sh.Len} + return *(*[]byte)(unsafe.Pointer(&bh)) +} diff --git a/vendor/github.com/yuin/gopher-lua/value.go b/vendor/github.com/yuin/gopher-lua/value.go new file mode 100644 index 00000000000..1cc31df1deb --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/value.go @@ -0,0 +1,247 @@ +package lua + +import ( + "fmt" + "golang.org/x/net/context" + "os" +) + +type LValueType int + +const ( + LTNil LValueType = iota + LTBool + LTNumber + LTString + LTFunction + LTUserData + LTThread + LTTable + LTChannel +) + +var lValueNames = [9]string{"nil", "boolean", "number", "string", "function", "userdata", "thread", "table", "channel"} + +func (vt LValueType) String() string { + return lValueNames[int(vt)] +} + +type LValue interface { + String() string + Type() LValueType + // to reduce `runtime.assertI2T2` costs, this method should be used instead of the type assertion in heavy paths(typically inside the VM). + assertFloat64() (float64, bool) + // to reduce `runtime.assertI2T2` costs, this method should be used instead of the type assertion in heavy paths(typically inside the VM). + assertString() (string, bool) + // to reduce `runtime.assertI2T2` costs, this method should be used instead of the type assertion in heavy paths(typically inside the VM). + assertFunction() (*LFunction, bool) +} + +// LVIsFalse returns true if a given LValue is a nil or false otherwise false. +func LVIsFalse(v LValue) bool { return v == LNil || v == LFalse } + +// LVIsFalse returns false if a given LValue is a nil or false otherwise true. +func LVAsBool(v LValue) bool { return v != LNil && v != LFalse } + +// LVAsString returns string representation of a given LValue +// if the LValue is a string or number, otherwise an empty string. +func LVAsString(v LValue) string { + switch sn := v.(type) { + case LString, LNumber: + return sn.String() + default: + return "" + } +} + +// LVCanConvToString returns true if a given LValue is a string or number +// otherwise false. +func LVCanConvToString(v LValue) bool { + switch v.(type) { + case LString, LNumber: + return true + default: + return false + } +} + +// LVAsNumber tries to convert a given LValue to a number. +func LVAsNumber(v LValue) LNumber { + switch lv := v.(type) { + case LNumber: + return lv + case LString: + if num, err := parseNumber(string(lv)); err == nil { + return num + } + } + return LNumber(0) +} + +type LNilType struct{} + +func (nl *LNilType) String() string { return "nil" } +func (nl *LNilType) Type() LValueType { return LTNil } +func (nl *LNilType) assertFloat64() (float64, bool) { return 0, false } +func (nl *LNilType) assertString() (string, bool) { return "", false } +func (nl *LNilType) assertFunction() (*LFunction, bool) { return nil, false } + +var LNil = LValue(&LNilType{}) + +type LBool bool + +func (bl LBool) String() string { + if bool(bl) { + return "true" + } + return "false" +} +func (bl LBool) Type() LValueType { return LTBool } +func (bl LBool) assertFloat64() (float64, bool) { return 0, false } +func (bl LBool) assertString() (string, bool) { return "", false } +func (bl LBool) assertFunction() (*LFunction, bool) { return nil, false } + +var LTrue = LBool(true) +var LFalse = LBool(false) + +type LString string + +func (st LString) String() string { return string(st) } +func (st LString) Type() LValueType { return LTString } +func (st LString) assertFloat64() (float64, bool) { return 0, false } +func (st LString) assertString() (string, bool) { return string(st), true } +func (st LString) assertFunction() (*LFunction, bool) { return nil, false } + +// fmt.Formatter interface +func (st LString) Format(f fmt.State, c rune) { + switch c { + case 'd', 'i': + if nm, err := parseNumber(string(st)); err != nil { + defaultFormat(nm, f, 'd') + } else { + defaultFormat(string(st), f, 's') + } + default: + defaultFormat(string(st), f, c) + } +} + +func (nm LNumber) String() string { + if isInteger(nm) { + return fmt.Sprint(int64(nm)) + } + return fmt.Sprint(float64(nm)) +} + +func (nm LNumber) Type() LValueType { return LTNumber } +func (nm LNumber) assertFloat64() (float64, bool) { return float64(nm), true } +func (nm LNumber) assertString() (string, bool) { return "", false } +func (nm LNumber) assertFunction() (*LFunction, bool) { return nil, false } + +// fmt.Formatter interface +func (nm LNumber) Format(f fmt.State, c rune) { + switch c { + case 'q', 's': + defaultFormat(nm.String(), f, c) + case 'b', 'c', 'd', 'o', 'x', 'X', 'U': + defaultFormat(int64(nm), f, c) + case 'e', 'E', 'f', 'F', 'g', 'G': + defaultFormat(float64(nm), f, c) + case 'i': + defaultFormat(int64(nm), f, 'd') + default: + if isInteger(nm) { + defaultFormat(int64(nm), f, c) + } else { + defaultFormat(float64(nm), f, c) + } + } +} + +type LTable struct { + Metatable LValue + + array []LValue + dict map[LValue]LValue + strdict map[string]LValue + keys []LValue + k2i map[LValue]int +} + +func (tb *LTable) String() string { return fmt.Sprintf("table: %p", tb) } +func (tb *LTable) Type() LValueType { return LTTable } +func (tb *LTable) assertFloat64() (float64, bool) { return 0, false } +func (tb *LTable) assertString() (string, bool) { return "", false } +func (tb *LTable) assertFunction() (*LFunction, bool) { return nil, false } + +type LFunction struct { + IsG bool + Env *LTable + Proto *FunctionProto + GFunction LGFunction + Upvalues []*Upvalue +} +type LGFunction func(*LState) int + +func (fn *LFunction) String() string { return fmt.Sprintf("function: %p", fn) } +func (fn *LFunction) Type() LValueType { return LTFunction } +func (fn *LFunction) assertFloat64() (float64, bool) { return 0, false } +func (fn *LFunction) assertString() (string, bool) { return "", false } +func (fn *LFunction) assertFunction() (*LFunction, bool) { return fn, true } + +type Global struct { + MainThread *LState + CurrentThread *LState + Registry *LTable + Global *LTable + + builtinMts map[int]LValue + tempFiles []*os.File + gccount int32 +} + +type LState struct { + G *Global + Parent *LState + Env *LTable + Panic func(*LState) + Dead bool + Options Options + + stop int32 + reg *registry + stack *callFrameStack + alloc *allocator + currentFrame *callFrame + wrapped bool + uvcache *Upvalue + hasErrorFunc bool + mainLoop func(*LState, *callFrame) + ctx context.Context +} + +func (ls *LState) String() string { return fmt.Sprintf("thread: %p", ls) } +func (ls *LState) Type() LValueType { return LTThread } +func (ls *LState) assertFloat64() (float64, bool) { return 0, false } +func (ls *LState) assertString() (string, bool) { return "", false } +func (ls *LState) assertFunction() (*LFunction, bool) { return nil, false } + +type LUserData struct { + Value interface{} + Env *LTable + Metatable LValue +} + +func (ud *LUserData) String() string { return fmt.Sprintf("userdata: %p", ud) } +func (ud *LUserData) Type() LValueType { return LTUserData } +func (ud *LUserData) assertFloat64() (float64, bool) { return 0, false } +func (ud *LUserData) assertString() (string, bool) { return "", false } +func (ud *LUserData) assertFunction() (*LFunction, bool) { return nil, false } + +type LChannel chan LValue + +func (ch LChannel) String() string { return fmt.Sprintf("channel: %p", ch) } +func (ch LChannel) Type() LValueType { return LTChannel } +func (ch LChannel) assertFloat64() (float64, bool) { return 0, false } +func (ch LChannel) assertString() (string, bool) { return "", false } +func (ch LChannel) assertFunction() (*LFunction, bool) { return nil, false } diff --git a/vendor/github.com/yuin/gopher-lua/vm.go b/vendor/github.com/yuin/gopher-lua/vm.go new file mode 100644 index 00000000000..3e2f15cc0e8 --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/vm.go @@ -0,0 +1,1390 @@ +package lua + +//////////////////////////////////////////////////////// +// This file was generated by go-inline. DO NOT EDIT. // +//////////////////////////////////////////////////////// + +import ( + "fmt" + "math" + "strings" +) + +func mainLoop(L *LState, baseframe *callFrame) { + var inst uint32 + var cf *callFrame + + if L.stack.IsEmpty() { + return + } + + L.currentFrame = L.stack.Last() + if L.currentFrame.Fn.IsG { + callGFunction(L, false) + return + } + + for { + cf = L.currentFrame + inst = cf.Fn.Proto.Code[cf.Pc] + cf.Pc++ + if jumpTable[int(inst>>26)](L, inst, baseframe) == 1 { + return + } + } +} + +func mainLoopWithContext(L *LState, baseframe *callFrame) { + var inst uint32 + var cf *callFrame + + if L.stack.IsEmpty() { + return + } + + L.currentFrame = L.stack.Last() + if L.currentFrame.Fn.IsG { + callGFunction(L, false) + return + } + + for { + cf = L.currentFrame + inst = cf.Fn.Proto.Code[cf.Pc] + cf.Pc++ + select { + case <-L.ctx.Done(): + L.RaiseError(L.ctx.Err().Error()) + return + default: + if jumpTable[int(inst>>26)](L, inst, baseframe) == 1 { + return + } + } + } +} + +func copyReturnValues(L *LState, regv, start, n, b int) { // +inline-start + if b == 1 { + // this section is inlined by go-inline + // source function is 'func (rg *registry) FillNil(regm, n int) ' in '_state.go' + { + rg := L.reg + regm := regv + for i := 0; i < n; i++ { + rg.array[regm+i] = LNil + } + rg.top = regm + n + } + } else { + // this section is inlined by go-inline + // source function is 'func (rg *registry) CopyRange(regv, start, limit, n int) ' in '_state.go' + { + rg := L.reg + limit := -1 + for i := 0; i < n; i++ { + if tidx := start + i; tidx >= rg.top || limit > -1 && tidx >= limit || tidx < 0 { + rg.array[regv+i] = LNil + } else { + rg.array[regv+i] = rg.array[tidx] + } + } + rg.top = regv + n + } + } +} // +inline-end + +func switchToParentThread(L *LState, nargs int, haserror bool, kill bool) { + parent := L.Parent + if parent == nil { + L.RaiseError("can not yield from outside of a coroutine") + } + L.G.CurrentThread = parent + L.Parent = nil + if !L.wrapped { + if haserror { + parent.Push(LFalse) + } else { + parent.Push(LTrue) + } + } + L.XMoveTo(parent, nargs) + L.stack.Pop() + offset := L.currentFrame.LocalBase - L.currentFrame.ReturnBase + L.currentFrame = L.stack.Last() + L.reg.SetTop(L.reg.Top() - offset) // remove 'yield' function(including tailcalled functions) + if kill { + L.kill() + } +} + +func callGFunction(L *LState, tailcall bool) bool { + frame := L.currentFrame + gfnret := frame.Fn.GFunction(L) + if tailcall { + L.stack.Remove(L.stack.Sp() - 2) // remove caller lua function frame + L.currentFrame = L.stack.Last() + } + + if gfnret < 0 { + switchToParentThread(L, L.GetTop(), false, false) + return true + } + + wantret := frame.NRet + if wantret == MultRet { + wantret = gfnret + } + + if tailcall && L.Parent != nil && L.stack.Sp() == 1 { + switchToParentThread(L, wantret, false, true) + return true + } + + // this section is inlined by go-inline + // source function is 'func (rg *registry) CopyRange(regv, start, limit, n int) ' in '_state.go' + { + rg := L.reg + regv := frame.ReturnBase + start := L.reg.Top() - gfnret + limit := -1 + n := wantret + for i := 0; i < n; i++ { + if tidx := start + i; tidx >= rg.top || limit > -1 && tidx >= limit || tidx < 0 { + rg.array[regv+i] = LNil + } else { + rg.array[regv+i] = rg.array[tidx] + } + } + rg.top = regv + n + } + L.stack.Pop() + L.currentFrame = L.stack.Last() + return false +} + +func threadRun(L *LState) { + if L.stack.IsEmpty() { + return + } + + defer func() { + if rcv := recover(); rcv != nil { + var lv LValue + if v, ok := rcv.(*ApiError); ok { + lv = v.Object + } else { + lv = LString(fmt.Sprint(rcv)) + } + if parent := L.Parent; parent != nil { + if L.wrapped { + L.Push(lv) + parent.Panic(L) + } else { + L.SetTop(0) + L.Push(lv) + switchToParentThread(L, 1, true, true) + } + } else { + panic(rcv) + } + } + }() + L.mainLoop(L, nil) +} + +type instFunc func(*LState, uint32, *callFrame) int + +var jumpTable [opCodeMax + 1]instFunc + +func init() { + jumpTable = [opCodeMax + 1]instFunc{ + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_MOVE + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + reg.Set(RA, reg.Get(lbase+B)) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_MOVEN + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + reg.Set(lbase+A, reg.Get(lbase+B)) + code := cf.Fn.Proto.Code + pc := cf.Pc + for i := 0; i < C; i++ { + inst = code[pc] + pc++ + A = int(inst>>18) & 0xff //GETA + B = int(inst & 0x1ff) //GETB + reg.Set(lbase+A, reg.Get(lbase+B)) + } + cf.Pc = pc + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LOADK + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + Bx := int(inst & 0x3ffff) //GETBX + reg.Set(RA, cf.Fn.Proto.Constants[Bx]) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LOADBOOL + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + if B != 0 { + reg.Set(RA, LTrue) + } else { + reg.Set(RA, LFalse) + } + if C != 0 { + cf.Pc++ + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LOADNIL + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + for i := RA; i <= lbase+B; i++ { + reg.Set(i, LNil) + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETUPVAL + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + reg.Set(RA, cf.Fn.Upvalues[B].Value()) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETGLOBAL + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + Bx := int(inst & 0x3ffff) //GETBX + //reg.Set(RA, L.getField(cf.Fn.Env, cf.Fn.Proto.Constants[Bx])) + reg.Set(RA, L.getFieldString(cf.Fn.Env, cf.Fn.Proto.stringConstants[Bx])) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETTABLE + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + reg.Set(RA, L.getField(reg.Get(lbase+B), L.rkValue(C))) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETTABLEKS + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + reg.Set(RA, L.getFieldString(reg.Get(lbase+B), L.rkString(C))) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETGLOBAL + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + Bx := int(inst & 0x3ffff) //GETBX + //L.setField(cf.Fn.Env, cf.Fn.Proto.Constants[Bx], reg.Get(RA)) + L.setFieldString(cf.Fn.Env, cf.Fn.Proto.stringConstants[Bx], reg.Get(RA)) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETUPVAL + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + cf.Fn.Upvalues[B].SetValue(reg.Get(RA)) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETTABLE + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + L.setField(reg.Get(RA), L.rkValue(B), L.rkValue(C)) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETTABLEKS + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + L.setFieldString(reg.Get(RA), L.rkString(B), L.rkValue(C)) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_NEWTABLE + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + reg.Set(RA, newLTable(B, C)) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SELF + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + selfobj := reg.Get(lbase + B) + reg.Set(RA, L.getFieldString(selfobj, L.rkString(C))) + reg.Set(RA+1, selfobj) + return 0 + }, + opArith, // OP_ADD + opArith, // OP_SUB + opArith, // OP_MUL + opArith, // OP_DIV + opArith, // OP_MOD + opArith, // OP_POW + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_UNM + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + unaryv := L.rkValue(B) + if nm, ok := unaryv.(LNumber); ok { + reg.SetNumber(RA, -nm) + } else { + op := L.metaOp1(unaryv, "__unm") + if op.Type() == LTFunction { + reg.Push(op) + reg.Push(unaryv) + L.Call(1, 1) + reg.Set(RA, reg.Pop()) + } else if str, ok1 := unaryv.(LString); ok1 { + if num, err := parseNumber(string(str)); err == nil { + reg.Set(RA, -num) + } else { + L.RaiseError("__unm undefined") + } + } else { + L.RaiseError("__unm undefined") + } + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_NOT + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + if LVIsFalse(reg.Get(lbase + B)) { + reg.Set(RA, LTrue) + } else { + reg.Set(RA, LFalse) + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LEN + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + switch lv := L.rkValue(B).(type) { + case LString: + reg.SetNumber(RA, LNumber(len(lv))) + default: + op := L.metaOp1(lv, "__len") + if op.Type() == LTFunction { + reg.Push(op) + reg.Push(lv) + L.Call(1, 1) + reg.Set(RA, reg.Pop()) + } else if lv.Type() == LTTable { + reg.SetNumber(RA, LNumber(lv.(*LTable).Len())) + } else { + L.RaiseError("__len undefined") + } + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_CONCAT + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + RC := lbase + C + RB := lbase + B + reg.Set(RA, stringConcat(L, RC-RB+1, RC)) + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_JMP + cf := L.currentFrame + Sbx := int(inst&0x3ffff) - opMaxArgSbx //GETSBX + cf.Pc += Sbx + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_EQ + cf := L.currentFrame + A := int(inst>>18) & 0xff //GETA + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + ret := equals(L, L.rkValue(B), L.rkValue(C), false) + v := 1 + if ret { + v = 0 + } + if v == A { + cf.Pc++ + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LT + cf := L.currentFrame + A := int(inst>>18) & 0xff //GETA + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + ret := lessThan(L, L.rkValue(B), L.rkValue(C)) + v := 1 + if ret { + v = 0 + } + if v == A { + cf.Pc++ + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LE + cf := L.currentFrame + A := int(inst>>18) & 0xff //GETA + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + lhs := L.rkValue(B) + rhs := L.rkValue(C) + ret := false + + if v1, ok1 := lhs.assertFloat64(); ok1 { + if v2, ok2 := rhs.assertFloat64(); ok2 { + ret = v1 <= v2 + } else { + L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String()) + } + } else { + if lhs.Type() != rhs.Type() { + L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String()) + } + switch lhs.Type() { + case LTString: + ret = strCmp(string(lhs.(LString)), string(rhs.(LString))) <= 0 + default: + switch objectRational(L, lhs, rhs, "__le") { + case 1: + ret = true + case 0: + ret = false + default: + ret = !objectRationalWithError(L, rhs, lhs, "__lt") + } + } + } + + v := 1 + if ret { + v = 0 + } + if v == A { + cf.Pc++ + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_TEST + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + C := int(inst>>9) & 0x1ff //GETC + if LVAsBool(reg.Get(RA)) == (C == 0) { + cf.Pc++ + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_TESTSET + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + if value := reg.Get(lbase + B); LVAsBool(value) != (C == 0) { + reg.Set(RA, value) + } else { + cf.Pc++ + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_CALL + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + nargs := B - 1 + if B == 0 { + nargs = reg.Top() - (RA + 1) + } + lv := reg.Get(RA) + nret := C - 1 + var callable *LFunction + var meta bool + if fn, ok := lv.assertFunction(); ok { + callable = fn + meta = false + } else { + callable, meta = L.metaCall(lv) + } + // this section is inlined by go-inline + // source function is 'func (ls *LState) pushCallFrame(cf callFrame, fn LValue, meta bool) ' in '_state.go' + { + ls := L + cf := callFrame{Fn: callable, Pc: 0, Base: RA, LocalBase: RA + 1, ReturnBase: RA, NArgs: nargs, NRet: nret, Parent: cf, TailCall: 0} + fn := lv + if meta { + cf.NArgs++ + ls.reg.Insert(fn, cf.LocalBase) + } + if cf.Fn == nil { + ls.RaiseError("attempt to call a non-function object") + } + if ls.stack.sp == ls.Options.CallStackSize { + ls.RaiseError("stack overflow") + } + // this section is inlined by go-inline + // source function is 'func (cs *callFrameStack) Push(v callFrame) ' in '_state.go' + { + cs := ls.stack + v := cf + cs.array[cs.sp] = v + cs.array[cs.sp].Idx = cs.sp + cs.sp++ + } + newcf := ls.stack.Last() + // this section is inlined by go-inline + // source function is 'func (ls *LState) initCallFrame(cf *callFrame) ' in '_state.go' + { + cf := newcf + if cf.Fn.IsG { + ls.reg.SetTop(cf.LocalBase + cf.NArgs) + } else { + proto := cf.Fn.Proto + nargs := cf.NArgs + np := int(proto.NumParameters) + for i := nargs; i < np; i++ { + ls.reg.array[cf.LocalBase+i] = LNil + nargs = np + } + + if (proto.IsVarArg & VarArgIsVarArg) == 0 { + if nargs < int(proto.NumUsedRegisters) { + nargs = int(proto.NumUsedRegisters) + } + for i := np; i < nargs; i++ { + ls.reg.array[cf.LocalBase+i] = LNil + } + ls.reg.top = cf.LocalBase + int(proto.NumUsedRegisters) + } else { + /* swap vararg positions: + closure + namedparam1 <- lbase + namedparam2 + vararg1 + vararg2 + + TO + + closure + nil + nil + vararg1 + vararg2 + namedparam1 <- lbase + namedparam2 + */ + nvarargs := nargs - np + if nvarargs < 0 { + nvarargs = 0 + } + + ls.reg.SetTop(cf.LocalBase + nargs + np) + for i := 0; i < np; i++ { + //ls.reg.Set(cf.LocalBase+nargs+i, ls.reg.Get(cf.LocalBase+i)) + ls.reg.array[cf.LocalBase+nargs+i] = ls.reg.array[cf.LocalBase+i] + //ls.reg.Set(cf.LocalBase+i, LNil) + ls.reg.array[cf.LocalBase+i] = LNil + } + + if CompatVarArg { + ls.reg.SetTop(cf.LocalBase + nargs + np + 1) + if (proto.IsVarArg & VarArgNeedsArg) != 0 { + argtb := newLTable(nvarargs, 0) + for i := 0; i < nvarargs; i++ { + argtb.RawSetInt(i+1, ls.reg.Get(cf.LocalBase+np+i)) + } + argtb.RawSetString("n", LNumber(nvarargs)) + //ls.reg.Set(cf.LocalBase+nargs+np, argtb) + ls.reg.array[cf.LocalBase+nargs+np] = argtb + } else { + ls.reg.array[cf.LocalBase+nargs+np] = LNil + } + } + cf.LocalBase += nargs + maxreg := cf.LocalBase + int(proto.NumUsedRegisters) + ls.reg.SetTop(maxreg) + } + } + } + ls.currentFrame = newcf + } + if callable.IsG && callGFunction(L, false) { + return 1 + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_TAILCALL + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + nargs := B - 1 + if B == 0 { + nargs = reg.Top() - (RA + 1) + } + lv := reg.Get(RA) + var callable *LFunction + var meta bool + if fn, ok := lv.assertFunction(); ok { + callable = fn + meta = false + } else { + callable, meta = L.metaCall(lv) + } + if callable == nil { + L.RaiseError("attempt to call a non-function object") + } + // this section is inlined by go-inline + // source function is 'func (ls *LState) closeUpvalues(idx int) ' in '_state.go' + { + ls := L + idx := lbase + if ls.uvcache != nil { + var prev *Upvalue + for uv := ls.uvcache; uv != nil; uv = uv.next { + if uv.index >= idx { + if prev != nil { + prev.next = nil + } else { + ls.uvcache = nil + } + uv.Close() + } + prev = uv + } + } + } + if callable.IsG { + luaframe := cf + L.pushCallFrame(callFrame{ + Fn: callable, + Pc: 0, + Base: RA, + LocalBase: RA + 1, + ReturnBase: cf.ReturnBase, + NArgs: nargs, + NRet: cf.NRet, + Parent: cf, + TailCall: 0, + }, lv, meta) + if callGFunction(L, true) { + return 1 + } + if L.currentFrame == nil || L.currentFrame.Fn.IsG || luaframe == baseframe { + return 1 + } + } else { + base := cf.Base + cf.Fn = callable + cf.Pc = 0 + cf.Base = RA + cf.LocalBase = RA + 1 + cf.ReturnBase = cf.ReturnBase + cf.NArgs = nargs + cf.NRet = cf.NRet + cf.TailCall++ + lbase := cf.LocalBase + if meta { + cf.NArgs++ + L.reg.Insert(lv, cf.LocalBase) + } + // this section is inlined by go-inline + // source function is 'func (ls *LState) initCallFrame(cf *callFrame) ' in '_state.go' + { + ls := L + if cf.Fn.IsG { + ls.reg.SetTop(cf.LocalBase + cf.NArgs) + } else { + proto := cf.Fn.Proto + nargs := cf.NArgs + np := int(proto.NumParameters) + for i := nargs; i < np; i++ { + ls.reg.array[cf.LocalBase+i] = LNil + nargs = np + } + + if (proto.IsVarArg & VarArgIsVarArg) == 0 { + if nargs < int(proto.NumUsedRegisters) { + nargs = int(proto.NumUsedRegisters) + } + for i := np; i < nargs; i++ { + ls.reg.array[cf.LocalBase+i] = LNil + } + ls.reg.top = cf.LocalBase + int(proto.NumUsedRegisters) + } else { + /* swap vararg positions: + closure + namedparam1 <- lbase + namedparam2 + vararg1 + vararg2 + + TO + + closure + nil + nil + vararg1 + vararg2 + namedparam1 <- lbase + namedparam2 + */ + nvarargs := nargs - np + if nvarargs < 0 { + nvarargs = 0 + } + + ls.reg.SetTop(cf.LocalBase + nargs + np) + for i := 0; i < np; i++ { + //ls.reg.Set(cf.LocalBase+nargs+i, ls.reg.Get(cf.LocalBase+i)) + ls.reg.array[cf.LocalBase+nargs+i] = ls.reg.array[cf.LocalBase+i] + //ls.reg.Set(cf.LocalBase+i, LNil) + ls.reg.array[cf.LocalBase+i] = LNil + } + + if CompatVarArg { + ls.reg.SetTop(cf.LocalBase + nargs + np + 1) + if (proto.IsVarArg & VarArgNeedsArg) != 0 { + argtb := newLTable(nvarargs, 0) + for i := 0; i < nvarargs; i++ { + argtb.RawSetInt(i+1, ls.reg.Get(cf.LocalBase+np+i)) + } + argtb.RawSetString("n", LNumber(nvarargs)) + //ls.reg.Set(cf.LocalBase+nargs+np, argtb) + ls.reg.array[cf.LocalBase+nargs+np] = argtb + } else { + ls.reg.array[cf.LocalBase+nargs+np] = LNil + } + } + cf.LocalBase += nargs + maxreg := cf.LocalBase + int(proto.NumUsedRegisters) + ls.reg.SetTop(maxreg) + } + } + } + // this section is inlined by go-inline + // source function is 'func (rg *registry) CopyRange(regv, start, limit, n int) ' in '_state.go' + { + rg := L.reg + regv := base + start := RA + limit := -1 + n := reg.Top() - RA - 1 + for i := 0; i < n; i++ { + if tidx := start + i; tidx >= rg.top || limit > -1 && tidx >= limit || tidx < 0 { + rg.array[regv+i] = LNil + } else { + rg.array[regv+i] = rg.array[tidx] + } + } + rg.top = regv + n + } + cf.Base = base + cf.LocalBase = base + (cf.LocalBase - lbase + 1) + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_RETURN + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + // this section is inlined by go-inline + // source function is 'func (ls *LState) closeUpvalues(idx int) ' in '_state.go' + { + ls := L + idx := lbase + if ls.uvcache != nil { + var prev *Upvalue + for uv := ls.uvcache; uv != nil; uv = uv.next { + if uv.index >= idx { + if prev != nil { + prev.next = nil + } else { + ls.uvcache = nil + } + uv.Close() + } + prev = uv + } + } + } + nret := B - 1 + if B == 0 { + nret = reg.Top() - RA + } + n := cf.NRet + if cf.NRet == MultRet { + n = nret + } + + if L.Parent != nil && L.stack.Sp() == 1 { + // this section is inlined by go-inline + // source function is 'func copyReturnValues(L *LState, regv, start, n, b int) ' in '_vm.go' + { + regv := reg.Top() + start := RA + b := B + if b == 1 { + // this section is inlined by go-inline + // source function is 'func (rg *registry) FillNil(regm, n int) ' in '_state.go' + { + rg := L.reg + regm := regv + for i := 0; i < n; i++ { + rg.array[regm+i] = LNil + } + rg.top = regm + n + } + } else { + // this section is inlined by go-inline + // source function is 'func (rg *registry) CopyRange(regv, start, limit, n int) ' in '_state.go' + { + rg := L.reg + limit := -1 + for i := 0; i < n; i++ { + if tidx := start + i; tidx >= rg.top || limit > -1 && tidx >= limit || tidx < 0 { + rg.array[regv+i] = LNil + } else { + rg.array[regv+i] = rg.array[tidx] + } + } + rg.top = regv + n + } + } + } + switchToParentThread(L, n, false, true) + return 1 + } + islast := baseframe == L.stack.Pop() || L.stack.IsEmpty() + // this section is inlined by go-inline + // source function is 'func copyReturnValues(L *LState, regv, start, n, b int) ' in '_vm.go' + { + regv := cf.ReturnBase + start := RA + b := B + if b == 1 { + // this section is inlined by go-inline + // source function is 'func (rg *registry) FillNil(regm, n int) ' in '_state.go' + { + rg := L.reg + regm := regv + for i := 0; i < n; i++ { + rg.array[regm+i] = LNil + } + rg.top = regm + n + } + } else { + // this section is inlined by go-inline + // source function is 'func (rg *registry) CopyRange(regv, start, limit, n int) ' in '_state.go' + { + rg := L.reg + limit := -1 + for i := 0; i < n; i++ { + if tidx := start + i; tidx >= rg.top || limit > -1 && tidx >= limit || tidx < 0 { + rg.array[regv+i] = LNil + } else { + rg.array[regv+i] = rg.array[tidx] + } + } + rg.top = regv + n + } + } + } + L.currentFrame = L.stack.Last() + if islast || L.currentFrame == nil || L.currentFrame.Fn.IsG { + return 1 + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_FORLOOP + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + if init, ok1 := reg.Get(RA).assertFloat64(); ok1 { + if limit, ok2 := reg.Get(RA + 1).assertFloat64(); ok2 { + if step, ok3 := reg.Get(RA + 2).assertFloat64(); ok3 { + init += step + reg.SetNumber(RA, LNumber(init)) + if (step > 0 && init <= limit) || (step <= 0 && init >= limit) { + Sbx := int(inst&0x3ffff) - opMaxArgSbx //GETSBX + cf.Pc += Sbx + reg.SetNumber(RA+3, LNumber(init)) + } else { + reg.SetTop(RA + 1) + } + } else { + L.RaiseError("for statement step must be a number") + } + } else { + L.RaiseError("for statement limit must be a number") + } + } else { + L.RaiseError("for statement init must be a number") + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_FORPREP + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + Sbx := int(inst&0x3ffff) - opMaxArgSbx //GETSBX + if init, ok1 := reg.Get(RA).assertFloat64(); ok1 { + if step, ok2 := reg.Get(RA + 2).assertFloat64(); ok2 { + reg.SetNumber(RA, LNumber(init-step)) + } else { + L.RaiseError("for statement step must be a number") + } + } else { + L.RaiseError("for statement init must be a number") + } + cf.Pc += Sbx + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_TFORLOOP + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + C := int(inst>>9) & 0x1ff //GETC + nret := C + reg.SetTop(RA + 3 + 2) + reg.Set(RA+3+2, reg.Get(RA+2)) + reg.Set(RA+3+1, reg.Get(RA+1)) + reg.Set(RA+3, reg.Get(RA)) + L.callR(2, nret, RA+3) + if value := reg.Get(RA + 3); value != LNil { + reg.Set(RA+2, value) + pc := cf.Fn.Proto.Code[cf.Pc] + cf.Pc += int(pc&0x3ffff) - opMaxArgSbx + } + cf.Pc++ + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETLIST + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + if C == 0 { + C = int(cf.Fn.Proto.Code[cf.Pc]) + cf.Pc++ + } + offset := (C - 1) * FieldsPerFlush + table := reg.Get(RA).(*LTable) + nelem := B + if B == 0 { + nelem = reg.Top() - RA - 1 + } + for i := 1; i <= nelem; i++ { + table.RawSetInt(offset+i, reg.Get(RA+i)) + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_CLOSE + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + // this section is inlined by go-inline + // source function is 'func (ls *LState) closeUpvalues(idx int) ' in '_state.go' + { + ls := L + idx := RA + if ls.uvcache != nil { + var prev *Upvalue + for uv := ls.uvcache; uv != nil; uv = uv.next { + if uv.index >= idx { + if prev != nil { + prev.next = nil + } else { + ls.uvcache = nil + } + uv.Close() + } + prev = uv + } + } + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_CLOSURE + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + Bx := int(inst & 0x3ffff) //GETBX + proto := cf.Fn.Proto.FunctionPrototypes[Bx] + closure := newLFunctionL(proto, cf.Fn.Env, int(proto.NumUpvalues)) + reg.Set(RA, closure) + for i := 0; i < int(proto.NumUpvalues); i++ { + inst = cf.Fn.Proto.Code[cf.Pc] + cf.Pc++ + B := opGetArgB(inst) + switch opGetOpCode(inst) { + case OP_MOVE: + closure.Upvalues[i] = L.findUpvalue(lbase + B) + case OP_GETUPVAL: + closure.Upvalues[i] = cf.Fn.Upvalues[B] + } + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_VARARG + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + B := int(inst & 0x1ff) //GETB + nparams := int(cf.Fn.Proto.NumParameters) + nvarargs := cf.NArgs - nparams + if nvarargs < 0 { + nvarargs = 0 + } + nwant := B - 1 + if B == 0 { + nwant = nvarargs + } + // this section is inlined by go-inline + // source function is 'func (rg *registry) CopyRange(regv, start, limit, n int) ' in '_state.go' + { + rg := reg + regv := RA + start := cf.Base + nparams + 1 + limit := cf.LocalBase + n := nwant + for i := 0; i < n; i++ { + if tidx := start + i; tidx >= rg.top || limit > -1 && tidx >= limit || tidx < 0 { + rg.array[regv+i] = LNil + } else { + rg.array[regv+i] = rg.array[tidx] + } + } + rg.top = regv + n + } + return 0 + }, + func(L *LState, inst uint32, baseframe *callFrame) int { //OP_NOP + return 0 + }, + } +} + +func opArith(L *LState, inst uint32, baseframe *callFrame) int { //OP_ADD, OP_SUB, OP_MUL, OP_DIV, OP_MOD, OP_POW + reg := L.reg + cf := L.currentFrame + lbase := cf.LocalBase + A := int(inst>>18) & 0xff //GETA + RA := lbase + A + opcode := int(inst >> 26) //GETOPCODE + B := int(inst & 0x1ff) //GETB + C := int(inst>>9) & 0x1ff //GETC + lhs := L.rkValue(B) + rhs := L.rkValue(C) + v1, ok1 := lhs.assertFloat64() + v2, ok2 := rhs.assertFloat64() + if ok1 && ok2 { + reg.SetNumber(RA, numberArith(L, opcode, LNumber(v1), LNumber(v2))) + } else { + reg.Set(RA, objectArith(L, opcode, lhs, rhs)) + } + return 0 +} + +func luaModulo(lhs, rhs LNumber) LNumber { + flhs := float64(lhs) + frhs := float64(rhs) + v := math.Mod(flhs, frhs) + if flhs < 0 || frhs < 0 && !(flhs < 0 && frhs < 0) { + v += frhs + } + return LNumber(v) +} + +func numberArith(L *LState, opcode int, lhs, rhs LNumber) LNumber { + switch opcode { + case OP_ADD: + return lhs + rhs + case OP_SUB: + return lhs - rhs + case OP_MUL: + return lhs * rhs + case OP_DIV: + return lhs / rhs + case OP_MOD: + return luaModulo(lhs, rhs) + case OP_POW: + flhs := float64(lhs) + frhs := float64(rhs) + return LNumber(math.Pow(flhs, frhs)) + } + panic("should not reach here") + return LNumber(0) +} + +func objectArith(L *LState, opcode int, lhs, rhs LValue) LValue { + event := "" + switch opcode { + case OP_ADD: + event = "__add" + case OP_SUB: + event = "__sub" + case OP_MUL: + event = "__mul" + case OP_DIV: + event = "__div" + case OP_MOD: + event = "__mod" + case OP_POW: + event = "__pow" + } + op := L.metaOp2(lhs, rhs, event) + if op.Type() == LTFunction { + L.reg.Push(op) + L.reg.Push(lhs) + L.reg.Push(rhs) + L.Call(2, 1) + return L.reg.Pop() + } + if str, ok := lhs.(LString); ok { + if lnum, err := parseNumber(string(str)); err == nil { + lhs = lnum + } + } + if str, ok := rhs.(LString); ok { + if rnum, err := parseNumber(string(str)); err == nil { + rhs = rnum + } + } + if v1, ok1 := lhs.assertFloat64(); ok1 { + if v2, ok2 := rhs.assertFloat64(); ok2 { + return numberArith(L, opcode, LNumber(v1), LNumber(v2)) + } + } + L.RaiseError(fmt.Sprintf("cannot perform %v operation between %v and %v", + strings.TrimLeft(event, "_"), lhs.Type().String(), rhs.Type().String())) + + return LNil +} + +func stringConcat(L *LState, total, last int) LValue { + rhs := L.reg.Get(last) + total-- + for i := last - 1; total > 0; { + lhs := L.reg.Get(i) + if !(LVCanConvToString(lhs) && LVCanConvToString(rhs)) { + op := L.metaOp2(lhs, rhs, "__concat") + if op.Type() == LTFunction { + L.reg.Push(op) + L.reg.Push(lhs) + L.reg.Push(rhs) + L.Call(2, 1) + rhs = L.reg.Pop() + total-- + i-- + } else { + L.RaiseError("cannot perform concat operation between %v and %v", lhs.Type().String(), rhs.Type().String()) + return LNil + } + } else { + buf := make([]string, total+1) + buf[total] = LVAsString(rhs) + for total > 0 { + lhs = L.reg.Get(i) + if !LVCanConvToString(lhs) { + break + } + buf[total-1] = LVAsString(lhs) + i-- + total-- + } + rhs = LString(strings.Join(buf, "")) + } + } + return rhs +} + +func lessThan(L *LState, lhs, rhs LValue) bool { + // optimization for numbers + if v1, ok1 := lhs.assertFloat64(); ok1 { + if v2, ok2 := rhs.assertFloat64(); ok2 { + return v1 < v2 + } + L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String()) + } + if lhs.Type() != rhs.Type() { + L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String()) + return false + } + ret := false + switch lhs.Type() { + case LTString: + ret = strCmp(string(lhs.(LString)), string(rhs.(LString))) < 0 + default: + ret = objectRationalWithError(L, lhs, rhs, "__lt") + } + return ret +} + +func equals(L *LState, lhs, rhs LValue, raw bool) bool { + if lhs.Type() != rhs.Type() { + return false + } + + ret := false + switch lhs.Type() { + case LTNil: + ret = true + case LTNumber: + v1, _ := lhs.assertFloat64() + v2, _ := rhs.assertFloat64() + ret = v1 == v2 + case LTBool: + ret = bool(lhs.(LBool)) == bool(rhs.(LBool)) + case LTString: + ret = string(lhs.(LString)) == string(rhs.(LString)) + case LTUserData, LTTable: + if lhs == rhs { + ret = true + } else if !raw { + switch objectRational(L, lhs, rhs, "__eq") { + case 1: + ret = true + default: + ret = false + } + } + default: + ret = lhs == rhs + } + return ret +} + +func objectRationalWithError(L *LState, lhs, rhs LValue, event string) bool { + switch objectRational(L, lhs, rhs, event) { + case 1: + return true + case 0: + return false + } + L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String()) + return false +} + +func objectRational(L *LState, lhs, rhs LValue, event string) int { + m1 := L.metaOp1(lhs, event) + m2 := L.metaOp1(rhs, event) + if m1.Type() == LTFunction && m1 == m2 { + L.reg.Push(m1) + L.reg.Push(lhs) + L.reg.Push(rhs) + L.Call(2, 1) + if LVAsBool(L.reg.Pop()) { + return 1 + } + return 0 + } + return -1 +} diff --git a/vendor/vendor.json b/vendor/vendor.json index c8ffe9eee30..1e22566717a 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -27,6 +27,72 @@ "revision": "9f32b5905fd6ce7384093f9d048437e79f7b4d85", "revisionTime": "2017-02-20T21:12:21Z" }, + { + "checksumSHA1": "fOaPbtxEyjvnQt1WjYV+qBBUa6w=", + "path": "github.com/aerospike/aerospike-client-go", + "revision": "0f3b54da6bdc2c31c505f9afbc5f434dd2089658", + "revisionTime": "2017-06-12T17:41:08Z" + }, + { + "checksumSHA1": "VPNsoCYh+XZL2JGX5LQh3JYKCKA=", + "path": "github.com/aerospike/aerospike-client-go/internal/lua", + "revision": "0f3b54da6bdc2c31c505f9afbc5f434dd2089658", + "revisionTime": "2017-06-12T17:41:08Z" + }, + { + "checksumSHA1": "XbQ7dIXjGH+/wl5UyVWosqpuoys=", + "path": "github.com/aerospike/aerospike-client-go/internal/lua/resources", + "revision": "0f3b54da6bdc2c31c505f9afbc5f434dd2089658", + "revisionTime": "2017-06-12T17:41:08Z" + }, + { + "checksumSHA1": "wGou8ehkc2gw/zWdteA/aq1/fOA=", + "path": "github.com/aerospike/aerospike-client-go/logger", + "revision": "0f3b54da6bdc2c31c505f9afbc5f434dd2089658", + "revisionTime": "2017-06-12T17:41:08Z" + }, + { + "checksumSHA1": "W19k/UVPrMdsV1KM0VgDRLv1EvU=", + "path": "github.com/aerospike/aerospike-client-go/pkg/bcrypt", + "revision": "0f3b54da6bdc2c31c505f9afbc5f434dd2089658", + "revisionTime": "2017-06-12T17:41:08Z" + }, + { + "checksumSHA1": "OIxaM040XKFig4zE9PZTTlI+s4M=", + "path": "github.com/aerospike/aerospike-client-go/pkg/ripemd160", + "revision": "0f3b54da6bdc2c31c505f9afbc5f434dd2089658", + "revisionTime": "2017-06-12T17:41:08Z" + }, + { + "checksumSHA1": "hn2G2IARUZneHSbwIr7cJlHIJrM=", + "path": "github.com/aerospike/aerospike-client-go/types", + "revision": "0f3b54da6bdc2c31c505f9afbc5f434dd2089658", + "revisionTime": "2017-06-12T17:41:08Z" + }, + { + "checksumSHA1": "Mja2OgcnYsZ3uUv02nixVHU4SbU=", + "path": "github.com/aerospike/aerospike-client-go/types/atomic", + "revision": "0f3b54da6bdc2c31c505f9afbc5f434dd2089658", + "revisionTime": "2017-06-12T17:41:08Z" + }, + { + "checksumSHA1": "1xmzaXT3uA8K5FL92TNw/PfG0TU=", + "path": "github.com/aerospike/aerospike-client-go/types/particle_type", + "revision": "0f3b54da6bdc2c31c505f9afbc5f434dd2089658", + "revisionTime": "2017-06-12T17:41:08Z" + }, + { + "checksumSHA1": "cXow2vGzsUtayrJuhz57vCDb2w0=", + "path": "github.com/aerospike/aerospike-client-go/types/rand", + "revision": "0f3b54da6bdc2c31c505f9afbc5f434dd2089658", + "revisionTime": "2017-06-12T17:41:08Z" + }, + { + "checksumSHA1": "BdxmIb2XgZufRGpQSlE8R9UEqWc=", + "path": "github.com/aerospike/aerospike-client-go/utils/buffer", + "revision": "0f3b54da6bdc2c31c505f9afbc5f434dd2089658", + "revisionTime": "2017-06-12T17:41:08Z" + }, { "checksumSHA1": "QZeGe47LzTo4drKys2X2DNGtPFA=", "path": "github.com/andrewkroh/sys/windows/svc/eventlog", @@ -883,6 +949,30 @@ "revisionTime": "2017-06-19T19:54:08Z", "tree": true }, + { + "checksumSHA1": "Und+nhgN1YsNWvd0aDYO+0cMcAo=", + "path": "github.com/yuin/gopher-lua", + "revision": "b402f3114ec730d8bddb074a6c137309f561aa78", + "revisionTime": "2017-04-03T16:00:31Z" + }, + { + "checksumSHA1": "yNGEI9BMDbGwabUnaHvV9ZZm/a0=", + "path": "github.com/yuin/gopher-lua/ast", + "revision": "b402f3114ec730d8bddb074a6c137309f561aa78", + "revisionTime": "2017-04-03T16:00:31Z" + }, + { + "checksumSHA1": "RzAFlqrwEE/ybIrUhDQeIE8+7pM=", + "path": "github.com/yuin/gopher-lua/parse", + "revision": "b402f3114ec730d8bddb074a6c137309f561aa78", + "revisionTime": "2017-04-03T16:00:31Z" + }, + { + "checksumSHA1": "pOY+AESgLoqczVCXNCvmPtJyUXE=", + "path": "github.com/yuin/gopher-lua/pm", + "revision": "b402f3114ec730d8bddb074a6c137309f561aa78", + "revisionTime": "2017-04-03T16:00:31Z" + }, { "checksumSHA1": "tK8eFmQ0JeKpR3P0TjiGobzlIh0=", "path": "golang.org/x/net/bpf", From cd928e234766444cbca48592ddd3477cbdb51c6e Mon Sep 17 00:00:00 2001 From: Alexander Kazakov Date: Wed, 28 Jun 2017 14:16:42 +0300 Subject: [PATCH 2/2] Update CHANGELOG.asciidoc --- CHANGELOG.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 28b49fb9814..b0df53d11d3 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -77,6 +77,7 @@ https://github.com/elastic/beats/compare/v6.0.0-alpha2...master[Check the HEAD d - Add random startup delay to each metricset to avoid the thundering herd problem. {issue}4010[4010] - Add the ability to configure audit rules to the kernel module. {pull}4482[4482] - Add the ability to configure kernel's audit failure mode. {pull}4516[4516] +- Add experimental Aerospike module. {pull}4560[4560] *Packetbeat*