diff --git a/CHANGELOG.md b/CHANGELOG.md index dd50a268..221dd8de 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. ### Changed ### Fixed +* Fetching invalid ddl confuguration (sharding key for non-existing space) + is no longer breaks CRUD requests (#308, PR #309). ## [0.12.0] - 28-06-22 diff --git a/crud/common/sharding/sharding_metadata.lua b/crud/common/sharding/sharding_metadata.lua index 2716462c..7df5d0d6 100644 --- a/crud/common/sharding/sharding_metadata.lua +++ b/crud/common/sharding/sharding_metadata.lua @@ -58,12 +58,20 @@ function sharding_metadata_module.fetch_on_storage() for _, tuple in sharding_key_space:pairs() do local space_name = tuple[sharding_utils.SPACE_NAME_FIELDNO] local sharding_key_def = tuple[sharding_utils.SPACE_SHARDING_KEY_FIELDNO] - local space_format = box.space[space_name]:format() - metadata_map[space_name] = { - sharding_key_def = sharding_key_def, - sharding_key_hash = storage_cache.get_sharding_key_hash(space_name), - space_format = space_format, - } + local space = box.space[space_name] + + if space ~= nil then + local space_format = space:format() + metadata_map[space_name] = { + sharding_key_def = sharding_key_def, + sharding_key_hash = storage_cache.get_sharding_key_hash(space_name), + space_format = space_format, + } + else + log.warn('Found sharding info for %q, but space not exists. ' .. + 'Ensure that you did a proper cleanup after DDL space drop.', + space_name) + end end end diff --git a/deps.sh b/deps.sh index 96a7d330..d5fd5833 100755 --- a/deps.sh +++ b/deps.sh @@ -28,5 +28,6 @@ rmdir "${TMPDIR}" tarantoolctl rocks install cartridge 2.7.4 tarantoolctl rocks install ddl 1.6.0 +tarantoolctl rocks install migrations 0.4.2 tarantoolctl rocks make diff --git a/test/entrypoint/srv_migration.lua b/test/entrypoint/srv_migration.lua new file mode 100755 index 00000000..b72e9c69 --- /dev/null +++ b/test/entrypoint/srv_migration.lua @@ -0,0 +1,46 @@ +#!/usr/bin/env tarantool + +require('strict').on() +_G.is_initialized = function() return false end + +local log = require('log') +local errors = require('errors') +local cartridge = require('cartridge') + +package.preload['customers-storage'] = function() + return { + role_name = 'customers-storage', + init = function(opts) + if opts.is_master then + box.schema.space.create('customers') + + box.space['customers']:format{ + {name = 'id', is_nullable = false, type = 'unsigned'}, + {name = 'bucket_id', is_nullable = false, type = 'unsigned'}, + {name = 'sharding_key', is_nullable = false, type = 'unsigned'}, + } + + box.space['customers']:create_index('pk', {parts = { 'id' }}) + box.space['customers']:create_index('bucket_id', {parts = { 'bucket_id' }}) + end + end, + } +end + +local ok, err = errors.pcall('CartridgeCfgError', cartridge.cfg, { + advertise_uri = 'localhost:3301', + http_port = 8081, + bucket_count = 3000, + roles = { + 'customers-storage', + 'cartridge.roles.crud-router', + 'cartridge.roles.crud-storage', + }} +) + +if not ok then + log.error('%s', err) + os.exit(1) +end + +_G.is_initialized = cartridge.is_healthy diff --git a/test/integration/migration_test.lua b/test/integration/migration_test.lua new file mode 100644 index 00000000..eba14745 --- /dev/null +++ b/test/integration/migration_test.lua @@ -0,0 +1,65 @@ +local fio = require('fio') + +local t = require('luatest') + +local helpers = require('test.helper') + +local pgroup = t.group('migration', { + {engine = 'memtx'}, + {engine = 'vinyl'}, +}) + +pgroup.before_all(function(g) + g.cluster = helpers.Cluster:new({ + datadir = fio.tempdir(), + server_command = helpers.entrypoint('srv_migration'), + use_vshard = true, + replicasets = helpers.get_test_replicasets(), + env = { + ['ENGINE'] = g.params.engine, + }, + }) + g.cluster:start() +end) + +pgroup.after_all(function(g) helpers.stop_cluster(g.cluster) end) + +pgroup.test_gh_308_select_after_improper_ddl_space_drop = function(g) + -- Create a space sharded by key with ddl tools. + helpers.call_on_storages(g.cluster, function(server) + server.net_box:eval([[ + local migrator_utils = require('migrator.utils') + + if not box.info.ro then + box.schema.space.create('customers_v2') + + box.space['customers_v2']:format{ + {name = 'id_v2', is_nullable = false, type = 'unsigned'}, + {name = 'bucket_id', is_nullable = false, type = 'unsigned'}, + {name = 'sharding_key', is_nullable = false, type = 'unsigned'}, + } + + box.space['customers_v2']:create_index('pk', {parts = { 'id_v2' }}) + box.space['customers_v2']:create_index('bucket_id', {parts = { 'bucket_id' }}) + + migrator_utils.register_sharding_key('customers_v2', {'sharding_key'}) + end + ]]) + end) + + -- Do not do any requests to refresh sharding metadata. + + -- Drop space, but do not clean up ddl sharding data. + helpers.call_on_storages(g.cluster, function(server) + server.net_box:eval([[ + if not box.info.ro then + box.space['customers_v2']:drop() + end + ]]) + end) + + -- Ensure that crud request for existing space is ok. + local _, err = g.cluster.main_server.net_box:call('crud.select', + {'customers', nil, { first = 1 }}) + t.assert_equals(err, nil) +end