diff --git a/kong/api/routes/apis.lua b/kong/api/routes/apis.lua index 0d61a68f9c81..aa0ddcf493f5 100644 --- a/kong/api/routes/apis.lua +++ b/kong/api/routes/apis.lua @@ -46,8 +46,7 @@ return { POST = function(self, dao_factory) crud.post(self.params, dao_factory.plugins, function(data) - data.signal = reports.api_signal - reports.send(data) + reports.send("api", data) end) end, diff --git a/kong/api/routes/plugins.lua b/kong/api/routes/plugins.lua index 4f143482220d..0ac41b390277 100644 --- a/kong/api/routes/plugins.lua +++ b/kong/api/routes/plugins.lua @@ -29,8 +29,7 @@ return { POST = function(self, dao_factory) crud.post(self.params, dao_factory.plugins, function(data) - data.signal = reports.api_signal - reports.send(data) + reports.send("api", data) end) end }, diff --git a/kong/core/cluster.lua b/kong/core/cluster.lua index 70efcc82b19b..9a026c69674e 100644 --- a/kong/core/cluster.lua +++ b/kong/core/cluster.lua @@ -1,100 +1,145 @@ -local cache = require "kong.tools.database_cache" local singletons = require "kong.singletons" + + +local kong_dict = ngx.shared.kong +local timer_at = ngx.timer.at local ngx_log = ngx.log +local ERR = ngx.ERR +local DEBUG = ngx.DEBUG -local resty_lock -local status, res = pcall(require, "resty.lock") -if status then - resty_lock = res -end local KEEPALIVE_INTERVAL = 30 -local ASYNC_AUTOJOIN_INTERVAL = 3 -local ASYNC_AUTOJOIN_RETRIES = 20 -- Try for max a minute (3s * 20) +local KEEPALIVE_KEY = "events:keepalive" +local AUTOJOIN_INTERVAL = 3 +local AUTOJOIN_KEY = "events:autojoin" +local AUTOJOIN_MAX_RETRIES = 20 -- Try for max a minute (3s * 20) +local AUTOJOIN_MAX_RETRIES_KEY = "autojoin_retries" + + +local function log(lvl, ...) + ngx_log(lvl, "[cluster] ", ...) +end + + +-- Hold a lock for the whole interval (exptime) to prevent multiple +-- worker processes from sending the test request simultaneously. +-- Other workers do not need to wait until this lock is released, +-- and can ignore the event, knowing another worker is handling it. +-- We substract 1ms to the exp time to prevent a race condition +-- with the next timer event. +local function get_lock(key, exptime) + local ok, err = kong_dict:safe_add(key, true, exptime - 0.001) + if not ok and err ~= "exists" then + log(ERR, "could not get lock from 'kong' shm: ", err) + end + + return ok +end + -local function create_timer(at, cb) - local ok, err = ngx.timer.at(at, cb) +local function create_timer(...) + local ok, err = timer_at(...) if not ok then - ngx_log(ngx.ERR, "[cluster] failed to create timer: ", err) + log(ERR, "could not create timer: ", err) end end -local function async_autojoin(premature) - if premature then return end + +local function autojoin_handler(premature) + if premature then + return + end + + -- increase retry count by 1 + + local n_retries, err = kong_dict:incr(AUTOJOIN_MAX_RETRIES_KEY, 1, 0) + if err then + log(ERR, "could not increment number of auto-join retries in 'kong' ", + "shm: ", err) + return + end + + -- register recurring retry timer + + if n_retries < AUTOJOIN_MAX_RETRIES then + -- all workers need to register a recurring timer, in case one of them + -- crashes. Hence, this must be called before the `get_lock()` call. + create_timer(AUTOJOIN_INTERVAL, autojoin_handler) + end + + if not get_lock(AUTOJOIN_KEY, AUTOJOIN_INTERVAL) then + return + end + + -- auto-join nodes table -- If this node is the only node in the cluster, but other nodes are present, then try to join them -- This usually happens when two nodes are started very fast, and the first node didn't write his -- information into the datastore yet. When the second node starts up, there is nothing to join yet. - local lock, err = resty_lock:new("cluster_autojoin_locks", { - exptime = ASYNC_AUTOJOIN_INTERVAL - 0.001 - }) - if not lock then - ngx_log(ngx.ERR, "could not create lock: ", err) - return - end - local elapsed = lock:lock("async_autojoin") - if elapsed and elapsed == 0 then - -- If the current member count on this node's cluster is 1, but there are more than 1 active nodes in - -- the DAO, then try to join them - local count, err = singletons.dao.nodes:count() + log(DEBUG, "auto-joining") + + -- If the current member count on this node's cluster is 1, but there are more than 1 active nodes in + -- the DAO, then try to join them + local count, err = singletons.dao.nodes:count() + if err then + log(ERR, err) + + elseif count > 1 then + local members, err = singletons.serf:members() if err then - ngx_log(ngx.ERR, tostring(err)) - elseif count > 1 then - local members, err = singletons.serf:members() + log(ERR, err) + + elseif #members < 2 then + -- Trigger auto-join + local _, err = singletons.serf:autojoin() if err then - ngx_log(ngx.ERR, tostring(err)) - elseif #members < 2 then - -- Trigger auto-join - local _, err = singletons.serf:autojoin() - if err then - ngx_log(ngx.ERR, tostring(err)) - end - else - return -- The node is already in the cluster and no need to continue + log(ERR, err) end - end - - -- Create retries counter key if it doesn't exist - if not cache.get(cache.autojoin_retries_key()) then - cache.rawset(cache.autojoin_retries_key(), 0) - end - local autojoin_retries = cache.incr(cache.autojoin_retries_key(), 1) -- Increment retries counter - if (autojoin_retries < ASYNC_AUTOJOIN_RETRIES) then - create_timer(ASYNC_AUTOJOIN_INTERVAL, async_autojoin) + else + return -- The node is already in the cluster and no need to continue end end end -local function send_keepalive(premature) - if premature then return end - - local lock = resty_lock:new("cluster_locks", { - exptime = KEEPALIVE_INTERVAL - 0.001 - }) - local elapsed = lock:lock("keepalive") - if elapsed and elapsed == 0 then - -- Send keepalive - local nodes, err = singletons.dao.nodes:find_all { - name = singletons.serf.node_name - } + +local function keepalive_handler(premature) + if premature then + return + end + + -- all workers need to register a recurring timer, in case one of them + -- crashes. Hence, this must be called before the `get_lock()` call. + create_timer(KEEPALIVE_INTERVAL, keepalive_handler) + + if not get_lock(KEEPALIVE_KEY, KEEPALIVE_INTERVAL) then + return + end + + log(DEBUG, "sending keepalive event to datastore") + + local nodes, err = singletons.dao.nodes:find_all { + name = singletons.serf.node_name + } + if err then + log(ERR, "could not retrieve nodes from datastore: ", err) + + elseif #nodes == 1 then + local node = nodes[1] + local _, err = singletons.dao.nodes:update(node, node, { + ttl = singletons.configuration.cluster_ttl_on_failure, + quiet = true + }) if err then - ngx_log(ngx.ERR, tostring(err)) - elseif #nodes == 1 then - local node = nodes[1] - local _, err = singletons.dao.nodes:update(node, node, {ttl=singletons.configuration.cluster_ttl_on_failure}) - if err then - ngx_log(ngx.ERR, tostring(err)) - end + log(ERR, "could not update node in datastore:", err) end end - - create_timer(KEEPALIVE_INTERVAL, send_keepalive) end + return { init_worker = function() - create_timer(KEEPALIVE_INTERVAL, send_keepalive) - create_timer(ASYNC_AUTOJOIN_INTERVAL, async_autojoin) -- Only execute one time + create_timer(KEEPALIVE_INTERVAL, keepalive_handler) + create_timer(AUTOJOIN_INTERVAL, autojoin_handler) end } diff --git a/kong/core/reports.lua b/kong/core/reports.lua index 1f9cba47b83b..b039994baf31 100644 --- a/kong/core/reports.lua +++ b/kong/core/reports.lua @@ -1,156 +1,202 @@ -local meta = require "kong.meta" -local cjson = require "cjson" -local cache = require "kong.tools.database_cache" +local cjson = require "cjson.safe" local utils = require "kong.tools.utils" -local pl_utils = require "pl.utils" -local pl_stringx = require "pl.stringx" -local resty_lock = require "resty.lock" local singletons = require "kong.singletons" local constants = require "kong.constants" -local concat = table.concat + + +local kong_dict = ngx.shared.kong local udp_sock = ngx.socket.udp +local timer_at = ngx.timer.at +local ngx_log = ngx.log +local concat = table.concat +local tostring = tostring +local pairs = pairs +local type = type +local ERR = ngx.ERR +local sub = string.sub -local ping_handler, system_infos -local enabled = false -local ping_interval = 3600 -local unique_str = utils.random_string() --------- --- utils --------- +local PING_INTERVAL = 3600 +local PING_KEY = "events:reports" +local BUFFERED_REQUESTS_COUNT_KEYS = "events:requests" -local function log_error(...) - ngx.log(ngx.WARN, "[reports] ", ...) -end -local function get_system_infos() - local infos = { - version = meta._VERSION - } +local _buffer = {} +local _enabled = false +local _unique_str = utils.random_string() +local _buffer_immutable_idx - local ok, _, stdout = pl_utils.executeex("getconf _NPROCESSORS_ONLN") - if ok then - infos.cores = tonumber(stdout:sub(1, -2)) - end - ok, _, stdout = pl_utils.executeex("hostname") - if ok then - infos.hostname = stdout:sub(1, -2) - end - ok, _, stdout = pl_utils.executeex("uname -a") - if ok then - infos.uname = stdout:gsub(";", ","):sub(1, -2) + +do + -- initialize immutable buffer data (the same for each report) + + local meta = require "kong.meta" + + local system_infos = utils.get_system_infos() + + -- <14>: syslog facility code 'log alert' + _buffer[#_buffer + 1] = "<14>version=" .. meta._VERSION + + for k, v in pairs(system_infos) do + _buffer[#_buffer + 1] = k .. "=" .. v end - return infos + + _buffer_immutable_idx = #_buffer -- max idx for immutable slots +end + + +local function log(lvl, ...) + ngx_log(lvl, "[reports] ", ...) end -system_infos = get_system_infos() -------------- -- UDP logger -------------- -local function send(t, host, port) - if not enabled then return end + +local function send_report(signal_type, t, host, port) + if not _enabled then + return + elseif type(signal_type) ~= "string" then + return error("signal_type (arg #1) must be a string", 2) + end + t = t or {} host = host or constants.SYSLOG.ADDRESS port = port or constants.SYSLOG.PORT - local buf = {} - for k, v in pairs(system_infos) do - buf[#buf+1] = k.."="..v - end + -- add signal type to data + + t.signal = signal_type + + -- insert given entity in mutable part of buffer + + local mutable_idx = _buffer_immutable_idx - -- entity formatting for k, v in pairs(t) do - if not pl_stringx.endswith(k, "id") and k ~= "created_at" then + if k ~= "created_at" and sub(k, -2) ~= "id" then if type(v) == "table" then - v = cjson.encode(v) + local json, err = cjson.encode(v) + if err then + log(ERR, "could not JSON encode given table entity: ", err) + end + + v = json end - buf[#buf+1] = k.."="..tostring(v) + mutable_idx = mutable_idx + 1 + _buffer[mutable_idx] = k .. "=" .. tostring(v) end end - local msg = concat(buf, ";") - local sock = udp_sock() local ok, err = sock:setpeername(host, port) if not ok then - log_error("could not set peer name for UDP socket: ", err) + log(ERR, "could not set peer name for UDP socket: ", err) return end sock:settimeout(1000) - ok, err = sock:send("<14>"..msg) -- syslog facility code 'log alert' + -- concat and send buffer + + --print(concat(_buffer, ";", 1, mutable_idx)) + + ok, err = sock:send(concat(_buffer, ";", 1, mutable_idx)) if not ok then - log_error("could not send data: ", err) + log(ERR, "could not send data: ", err) end ok, err = sock:close() if not ok then - log_error("could not close socket: ", err) + log(ERR, "could not close socket: ", err) end end ---------------- --- ping handler ---------------- -local function create_ping_timer() - local ok, err = ngx.timer.at(ping_interval, ping_handler) +-- ping timer handler + + +-- Hold a lock for the whole interval (exptime) to prevent multiple +-- worker processes from sending the test request simultaneously. +-- Other workers do not need to wait until this lock is released, +-- and can ignore the event, knowing another worker is handling it. +-- We substract 1ms to the exp time to prevent a race condition +-- with the next timer event. +local function get_lock(key, exptime) + local ok, err = kong_dict:safe_add(key, true, exptime - 0.001) + if not ok and err ~= "exists" then + log(ERR, "could not get lock from 'kong' shm: ", err) + end + + return ok +end + + +local function create_timer(...) + local ok, err = timer_at(...) if not ok then - log_error("failed to create ping timer: ", err) + log(ERR, "could not create ping timer: ", err) end end -ping_handler = function(premature) - if premature then return end - local lock, err = resty_lock:new("reports_locks", { - exptime = ping_interval - 0.001 - }) - if not lock then - log_error("could not create lock: ", err) +local function ping_handler(premature) + if premature then + return + end + + -- all workers need to register a recurring timer, in case one of them + -- crashes. Hence, this must be called before the `get_lock()` call. + create_timer(PING_INTERVAL, ping_handler) + + if not get_lock(PING_KEY, PING_INTERVAL) then return end - local elapsed, err = lock:lock("ping") - if not elapsed then - log_error("failed to acquire ping lock: ", err) - elseif elapsed == 0 then - send { - signal = "ping", - requests = cache.get(cache.requests_key()) or 0, - unique_id = unique_str, - database = singletons.configuration.database - } - cache.rawset(cache.requests_key(), 0) + local n_requests, err = kong_dict:get(BUFFERED_REQUESTS_COUNT_KEYS) + if err then + log(ERR, "could not get buffered requests count from 'kong' shm: ", err) + elseif not n_requests then + n_requests = 0 end - create_ping_timer() + send_report("ping", { + requests = n_requests, + unique_id = _unique_str, + database = singletons.configuration.database + }) + + local ok, err = kong_dict:incr(BUFFERED_REQUESTS_COUNT_KEYS, -n_requests, n_requests) + if not ok then + log(ERR, "could not reset buffered requests count in 'kong' shm: ", err) + end end + return { - ----------------- -- plugin handler - ----------------- init_worker = function() - if not enabled then return end - cache.rawset(cache.requests_key(), 0) - create_ping_timer() + if not _enabled then + return + end + + create_timer(PING_INTERVAL, ping_handler) end, log = function() - if not enabled then return end - cache.incr(cache.requests_key(), 1) + if not _enabled then + return + end + + local ok, err = kong_dict:incr(BUFFERED_REQUESTS_COUNT_KEYS, 1, 0) + if not ok then + log(ERR, "could not increment buffered requests count in 'kong' shm: ", + err) + end end, - ----------------- + -- custom methods - ----------------- toggle = function(enable) - enabled = enable + _enabled = enable end, - get_system_infos = get_system_infos, - send = send, - api_signal = "api" + send = send_report, } diff --git a/kong/templates/nginx_kong.lua b/kong/templates/nginx_kong.lua index 116ac5332ee9..6454e91b5a8f 100644 --- a/kong/templates/nginx_kong.lua +++ b/kong/templates/nginx_kong.lua @@ -36,9 +36,6 @@ lua_max_running_timers 4096; lua_max_pending_timers 16384; lua_shared_dict kong 4m; lua_shared_dict cache ${{MEM_CACHE_SIZE}}; -lua_shared_dict reports_locks 100k; -lua_shared_dict cluster_locks 100k; -lua_shared_dict cluster_autojoin_locks 100k; lua_shared_dict cache_locks 100k; lua_shared_dict cassandra 1m; lua_shared_dict cassandra_prepared 5m; diff --git a/kong/tools/database_cache.lua b/kong/tools/database_cache.lua index 5bae4643401d..7a78023414b3 100644 --- a/kong/tools/database_cache.lua +++ b/kong/tools/database_cache.lua @@ -15,9 +15,6 @@ local CACHE_KEYS = { OAUTH2_TOKEN = "oauth2_token", ACLS = "acls", SSL = "ssl", - REQUESTS = "requests", - AUTOJOIN_RETRIES = "autojoin_retries", - TIMERS = "timers", ALL_APIS_BY_DIC = "ALL_APIS_BY_DIC", LDAP_CREDENTIAL = "ldap_credentials", BOT_DETECTION = "bot_detection" @@ -62,14 +59,6 @@ function _M.delete_all() cache:flush_expired() -- This does actually remove the elements from the memory end -function _M.requests_key() - return CACHE_KEYS.REQUESTS -end - -function _M.autojoin_retries_key() - return CACHE_KEYS.AUTOJOIN_RETRIES -end - function _M.api_key(host) return CACHE_KEYS.APIS..":"..host end diff --git a/kong/tools/utils.lua b/kong/tools/utils.lua index 1822e6f65952..abb18d8f7a69 100644 --- a/kong/tools/utils.lua +++ b/kong/tools/utils.lua @@ -8,10 +8,8 @@ -- @license [Apache 2.0](https://opensource.org/licenses/Apache-2.0) -- @module kong.tools.utils -local url = require "socket.url" local ffi = require "ffi" local uuid = require "resty.jit-uuid" -local pl_stringx = require "pl.stringx" local C = ffi.C local ffi_new = ffi.new @@ -66,6 +64,34 @@ function _M.get_hostname() return result end +do + local pl_utils = require "pl.utils" + + local _system_infos + + function _M.get_system_infos() + if _system_infos then + return _system_infos + end + + _system_infos = { + hostname = _M.get_hostname() + } + + local ok, _, stdout = pl_utils.executeex("getconf _NPROCESSORS_ONLN") + if ok then + _system_infos.cores = tonumber(stdout:sub(1, -2)) + end + + ok, _, stdout = pl_utils.executeex("uname -a") + if ok then + _system_infos.uname = stdout:gsub(";", ","):sub(1, -2) + end + + return _system_infos + end +end + do local bytes_buf_t = ffi.typeof "char[?]" @@ -118,66 +144,74 @@ end -- return str == "00000000-0000-0000-0000-000000000000" or uuid.is_valid(str) --end -_M.split = pl_stringx.split -_M.strip = pl_stringx.strip +do + local pl_stringx = require "pl.stringx" ---- URL escape and format key and value --- An obligatory url.unescape pass must be done to prevent double-encoding --- already encoded values (which contain a '%' character that `url.escape` escapes) -local function encode_args_value(key, value, raw) - if not raw then - key = url.unescape(key) - key = url.escape(key) - end - if value ~= nil then - if not raw then - value = url.unescape(value) - value = url.escape(value) - end - return fmt("%s=%s", key, value) - else - return key - end + _M.split = pl_stringx.split + _M.strip = pl_stringx.strip end ---- Encode a Lua table to a querystring --- Tries to mimic ngx_lua's `ngx.encode_args`, but also percent-encode querystring values. --- Supports multi-value query args, boolean values. --- It also supports encoding for bodies (only because it is used in http_client for specs. --- @TODO drop and use `ngx.encode_args` once it implements percent-encoding. --- @see https://github.com/Mashape/kong/issues/749 --- @param[type=table] args A key/value table containing the query args to encode. --- @param[type=boolean] raw If true, will not percent-encode any key/value and will ignore special boolean rules. --- @treturn string A valid querystring (without the prefixing '?') -function _M.encode_args(args, raw) - local query = {} - local keys = {} - - for k in pairs(args) do - keys[#keys+1] = k +do + local url = require "socket.url" + + --- URL escape and format key and value + -- An obligatory url.unescape pass must be done to prevent double-encoding + -- already encoded values (which contain a '%' character that `url.escape` escapes) + local function encode_args_value(key, value, raw) + if not raw then + key = url.unescape(key) + key = url.escape(key) + end + if value ~= nil then + if not raw then + value = url.unescape(value) + value = url.escape(value) + end + return fmt("%s=%s", key, value) + else + return key + end end - sort(keys) + --- Encode a Lua table to a querystring + -- Tries to mimic ngx_lua's `ngx.encode_args`, but also percent-encode querystring values. + -- Supports multi-value query args, boolean values. + -- It also supports encoding for bodies (only because it is used in http_client for specs. + -- @TODO drop and use `ngx.encode_args` once it implements percent-encoding. + -- @see https://github.com/Mashape/kong/issues/749 + -- @param[type=table] args A key/value table containing the query args to encode. + -- @param[type=boolean] raw If true, will not percent-encode any key/value and will ignore special boolean rules. + -- @treturn string A valid querystring (without the prefixing '?') + function _M.encode_args(args, raw) + local query = {} + local keys = {} + + for k in pairs(args) do + keys[#keys+1] = k + end - for _, key in ipairs(keys) do - local value = args[key] - if type(value) == "table" then - for _, sub_value in ipairs(value) do - query[#query+1] = encode_args_value(key, sub_value, raw) - end - elseif value == true then - query[#query+1] = encode_args_value(key, raw and true or nil, raw) - elseif value ~= false and value ~= nil or raw then - value = tostring(value) - if value ~= "" then - query[#query+1] = encode_args_value(key, value, raw) - elseif raw or value == "" then - query[#query+1] = key + sort(keys) + + for _, key in ipairs(keys) do + local value = args[key] + if type(value) == "table" then + for _, sub_value in ipairs(value) do + query[#query+1] = encode_args_value(key, sub_value, raw) + end + elseif value == true then + query[#query+1] = encode_args_value(key, raw and true or nil, raw) + elseif value ~= false and value ~= nil or raw then + value = tostring(value) + if value ~= "" then + query[#query+1] = encode_args_value(key, value, raw) + elseif raw or value == "" then + query[#query+1] = key + end end end - end - return concat(query, "&") + return concat(query, "&") + end end --- Checks whether a request is https or was originally https (but already terminated). diff --git a/spec/01-unit/04-utils_spec.lua b/spec/01-unit/04-utils_spec.lua index 7eb565c7d3cd..a804d1fff0b5 100644 --- a/spec/01-unit/04-utils_spec.lua +++ b/spec/01-unit/04-utils_spec.lua @@ -2,8 +2,27 @@ local utils = require "kong.tools.utils" describe("Utils", function() - it("should retrieve the hostname", function() - assert.truthy(utils.get_hostname()) + describe("get_hostname()", function() + it("should retrieve the hostname", function() + assert.is_string(utils.get_hostname()) + end) + end) + + describe("get_system_infos()", function() + it("retrieves various host infos", function() + local infos = utils.get_system_infos() + assert.is_number(infos.cores) + assert.is_string(infos.hostname) + assert.is_string(infos.uname) + assert.not_matches("\n$", infos.hostname) + assert.not_matches("\n$", infos.uname) + end) + it("caches the result", function() + assert.equal( + utils.get_system_infos(), + utils.get_system_infos() + ) + end) end) describe("is_valid_uuid()", function() diff --git a/spec/01-unit/05-reports_spec.lua b/spec/01-unit/05-reports_spec.lua deleted file mode 100644 index 2a5333565d84..000000000000 --- a/spec/01-unit/05-reports_spec.lua +++ /dev/null @@ -1,14 +0,0 @@ -local reports = require "kong.core.reports" - -describe("reports", function() - describe("get_system_infos()", function() - it("gets infos about current host", function() - local infos = reports.get_system_infos() - assert.is_number(infos.cores) - assert.is_string(infos.hostname) - assert.is_string(infos.uname) - assert.not_matches("\n$", infos.hostname) - assert.not_matches("\n$", infos.uname) - end) - end) -end) diff --git a/spec/01-unit/11-database_cache_spec.lua b/spec/01-unit/11-database_cache_spec.lua index b0ebd20f42b6..bdfdf9fe3546 100644 --- a/spec/01-unit/11-database_cache_spec.lua +++ b/spec/01-unit/11-database_cache_spec.lua @@ -27,8 +27,4 @@ describe("Database cache", function() assert.are.equal("jwtauth_credentials:hello", cache.jwtauth_credential_key("hello")) end) - it("returns a valid requests cache key", function() - assert.are.equal("requests", cache.requests_key()) - end) - end) diff --git a/spec/02-integration/04-core/01-reports_spec.lua b/spec/02-integration/04-core/01-reports_spec.lua index 177b98fdbd72..169fee7aacd9 100644 --- a/spec/02-integration/04-core/01-reports_spec.lua +++ b/spec/02-integration/04-core/01-reports_spec.lua @@ -10,7 +10,7 @@ describe("reports", function() it("sends report over UDP", function() local thread = helpers.udp_server(8189) - reports.send({ + reports.send("stub", { hello = "world", foo = "bar" }, "127.0.0.1", 8189) @@ -21,10 +21,11 @@ describe("reports", function() res = res:sub(5) assert.matches("cores=%d+", res) assert.matches("uname=[%w]+", res) - assert.matches("version="..meta._VERSION, res) + assert.matches("version=" .. meta._VERSION, res, nil, true) assert.matches("hostname=[%w]+", res) - assert.matches("foo=bar", res) - assert.matches("hello=world", res) + assert.matches("foo=bar", res, nil, true) + assert.matches("hello=world", res, nil, true) + assert.matches("signal=stub", res, nil, true) end) it("doesn't send if not enabled", function() reports.toggle(false) diff --git a/spec/fixtures/custom_nginx.template b/spec/fixtures/custom_nginx.template index 8212bdc4035e..d3c578c281e8 100644 --- a/spec/fixtures/custom_nginx.template +++ b/spec/fixtures/custom_nginx.template @@ -33,9 +33,6 @@ http { lua_max_running_timers 4096; lua_max_pending_timers 16384; lua_shared_dict cache ${{MEM_CACHE_SIZE}}; - lua_shared_dict reports_locks 100k; - lua_shared_dict cluster_locks 100k; - lua_shared_dict cluster_autojoin_locks 100k; lua_shared_dict cassandra 1m; lua_shared_dict cassandra_prepared 5m; lua_socket_log_errors off;