-
Notifications
You must be signed in to change notification settings - Fork 4.8k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
cache-locks #1402
cache-locks #1402
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,3 +1,4 @@ | ||
local resty_lock = require "resty.lock" | ||
local cjson = require "cjson" | ||
local cache = ngx.shared.cache | ||
|
||
|
@@ -120,21 +121,42 @@ function _M.all_apis_by_dict_key() | |
end | ||
|
||
function _M.get_or_set(key, cb) | ||
local lock = resty_lock:new("cache_locks", { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
exptime = 10, | ||
timeout = 10 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think the default value of Secondly: the lock should not be created unless the case hit is a miss. Currently, this lock is created before even trying to access the cache (a few lines down). There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
I am handling this case. Before creating the lock we have: value = _M.get(key)
if value then return value end There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Where is that? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. By creating the lock I mean |
||
}) | ||
|
||
local value, err | ||
-- Try to get | ||
|
||
-- Try to get the value from the cache | ||
value = _M.get(key) | ||
if value then return value end | ||
|
||
-- The value is missing, acquire a lock | ||
local elapsed, err = lock:lock(key) | ||
if not elapsed then | ||
ngx.log(ngx.ERR, "failed to acquire cache lock: "..err) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Prefer using a comma: ngx.log(ngx.ERR, "failed to acquire cache lock: ", err) It is less expensive to call |
||
end | ||
|
||
-- Lock acquired. Since in the meantime another worker may have | ||
-- populated the value we have to check again | ||
value = _M.get(key) | ||
if not value then | ||
-- Get from closure | ||
value, err = cb() | ||
if err then | ||
return nil, err | ||
elseif value then | ||
if value then | ||
local ok, err = _M.set(key, value) | ||
if not ok then | ||
if err then | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It would be nice to stay consistent in our error handling, all across our application. I believe the previous condition |
||
ngx.log(ngx.ERR, err) | ||
end | ||
end | ||
end | ||
|
||
local ok, err = lock:unlock() | ||
if not ok then | ||
ngx.log(ngx.ERR, "failed to unlock: "..err) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ditto: concatenation |
||
end | ||
|
||
return value | ||
end | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,84 @@ | ||
local helpers = require "spec.helpers" | ||
local cjson = require "cjson" | ||
local meta = require "kong.meta" | ||
|
||
describe("Resolver", function() | ||
local client | ||
setup(function() | ||
helpers.kill_all() | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No need to call |
||
|
||
assert(helpers.dao.apis:insert { | ||
request_host = "mockbin.com", | ||
upstream_url = "http://mockbin.com" | ||
}) | ||
|
||
assert(helpers.start_kong({ | ||
["custom_plugins"] = "database-cache", | ||
lua_package_path = "?/init.lua;./kong/?.lua;./spec/fixtures/?.lua" | ||
})) | ||
|
||
-- Add the plugin | ||
local admin_client = helpers.admin_client() | ||
local res = assert(admin_client:send { | ||
method = "POST", | ||
path = "/apis/mockbin.com/plugins/", | ||
body = { | ||
name = "database-cache" | ||
}, | ||
headers = { | ||
["Content-Type"] = "application/json" | ||
} | ||
}) | ||
assert.res_status(201, res) | ||
admin_client:close() | ||
end) | ||
|
||
teardown(function() | ||
helpers.stop_kong() | ||
end) | ||
|
||
it("avoids dog-pile effect", function() | ||
local function make_request(premature, sleep_time) | ||
local client = helpers.proxy_client() | ||
local res = assert(client:send { | ||
method = "GET", | ||
path = "/status/200?sleep="..sleep_time, | ||
headers = { | ||
["Host"] = "mockbin.com" | ||
} | ||
}) | ||
res:read_body() | ||
client:close() | ||
end | ||
|
||
local ok, err = ngx.timer.at(0, make_request, 2) | ||
assert.truthy(ok) | ||
|
||
local ok, err = ngx.timer.at(0, make_request, 5) | ||
assert.truthy(ok) | ||
|
||
local ok, err = ngx.timer.at(0, make_request, 1) | ||
assert.truthy(ok) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Those 3 can basically be replaced with the proper Lua idiom: assert(ngx.timer.at(0, make_request, 2))
assert(ngx.timer.at(0, make_request, 5))
assert(ngx.timer.at(0, make_request, 1)) |
||
|
||
helpers.wait_until(function() | ||
local admin_client = helpers.admin_client() | ||
local res = assert(admin_client:send { | ||
method = "GET", | ||
path = "/cache/invocations" | ||
}) | ||
local body = res:read_body() | ||
admin_client:close() | ||
return cjson.decode(body).message == 3 | ||
end, 10) | ||
|
||
-- Invocation are 3, but lookups should be 1 | ||
local admin_client = helpers.admin_client() | ||
local res = assert(admin_client:send { | ||
method = "GET", | ||
path = "/cache/lookups" | ||
}) | ||
local body = res:read_body() | ||
admin_client:close() | ||
assert.equal(1, cjson.decode(body).message) | ||
end) | ||
end) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
local BasePlugin = require "kong.plugins.base_plugin" | ||
local cache = require "kong.tools.database_cache" | ||
|
||
local INVOCATIONS = "invocations" | ||
local LOOKUPS = "lookups" | ||
|
||
local DatabaseCacheHandler = BasePlugin:extend() | ||
|
||
DatabaseCacheHandler.PRIORITY = 1000 | ||
|
||
function DatabaseCacheHandler:new() | ||
DatabaseCacheHandler.super.new(self, "database-cache") | ||
end | ||
|
||
function DatabaseCacheHandler:init_worker() | ||
DatabaseCacheHandler.super.init_worker(self) | ||
|
||
cache.rawset(INVOCATIONS, 0) | ||
cache.rawset(LOOKUPS, 0) | ||
end | ||
|
||
function DatabaseCacheHandler:access(conf) | ||
DatabaseCacheHandler.super.access(self) | ||
|
||
cache.get_or_set("pile_effect", function() | ||
cache.incr(LOOKUPS, 1) | ||
-- Adds some delay | ||
ngx.sleep(tonumber(ngx.req.get_uri_args().sleep)) | ||
return true | ||
end) | ||
|
||
cache.incr(INVOCATIONS, 1) | ||
end | ||
|
||
return DatabaseCacheHandler |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
return { | ||
fields = {} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
we do not need so many different
*_locks
shared dicts. A single one is able to hold multiple locks with different options (expiration, timeout, etc...). It will be worth cleaning this up some time soon.