From 2e8e089e8f528ab1c6e9e96dee23e63531a480fc Mon Sep 17 00:00:00 2001
From: Georgy Moiseev <moiseev.georgii@gmail.com>
Date: Fri, 22 Dec 2023 11:38:42 +0300
Subject: [PATCH] crud: support vshard identification by name

In vshard 0.1.25, new feature related to vshard configuration and
storage info was introduced [1]. If the new mode is used, crud module
fails to bootstrap and work in several places. This feature is enabled
by Tarantool 3.0 if vshard cluster was configured with 3.0 config.

After this patch, it is possible to bootstrap a vshard cluster with
new configuration mode. We run all existing vshard tests with new modes
after this patch. Module code was updated to support both possible mods.

1. https://github.com/tarantool/vshard/issues/426

Closes #403
---
 CHANGELOG.md                       |   6 ++
 crud.lua                           |  18 +----
 crud/common/utils.lua              |  45 +++++++++---
 crud/readview.lua                  |   6 +-
 test/helper.lua                    |  68 ++++++++++++++----
 test/performance/perf_test.lua     |   6 +-
 test/unit/call_test.lua            |   4 +-
 test/unit/not_initialized_test.lua |   2 +-
 test/unit/stats_test.lua           |   1 +
 test/vshard_helpers/server.lua     |  17 +++++
 test/vshard_helpers/vtest.lua      | 110 +++++++++++++++++++++++------
 11 files changed, 213 insertions(+), 70 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3f9a0bc1..35aa519c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,6 +5,12 @@ All notable changes to this project will be documented in this file.
 The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
 and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
 
+## Unreleased
+
+### Fixed
+* Compatibility with vshard 0.1.25 `name_as_key` identification mode
+  for Tarantool 3.0 (#403).
+
 ## [1.4.1] - 23-10-23
 
 ### Changed
diff --git a/crud.lua b/crud.lua
index eb250cb1..5d1e1e1d 100644
--- a/crud.lua
+++ b/crud.lua
@@ -23,7 +23,6 @@ local stats = require('crud.stats')
 local readview = require('crud.readview')
 local schema = require('crud.schema')
 
-local vshard = require('vshard')
 local luri = require('uri')
 
 local crud = {}
@@ -174,25 +173,14 @@ function crud.init_storage()
 
     local user = nil
     if not box.info.ro then
-        local ok, storage_info = pcall(vshard.storage.info)
-        if not ok then
-            error('vshard.storage.cfg() must be called first')
-        end
+        local replicaset_uuid, replicaset = utils.get_self_vshard_replicaset()
 
-        local box_info = box.info()
-        local replicaset_uuid
-        if box_info.replicaset ~= nil then
-            replicaset_uuid = box_info.replicaset.uuid
-        else
-            replicaset_uuid = box_info.cluster.uuid
-        end
-        local replicaset_info = storage_info.replicasets[replicaset_uuid]
-        if replicaset_info == nil or replicaset_info.master == nil then
+        if replicaset == nil or replicaset.master == nil then
             error(string.format('Failed to find a vshard configuration for ' ..
                 ' replicaset with replicaset_uuid %s.',
                 replicaset_uuid))
         end
-        user = luri.parse(replicaset_info.master.uri).login or 'guest'
+        user = luri.parse(replicaset.master.uri).login or 'guest'
     end
 
     if rawget(_G, '_crud') == nil then
diff --git a/crud/common/utils.lua b/crud/common/utils.lua
index 1c7f8aeb..7d624d1f 100644
--- a/crud/common/utils.lua
+++ b/crud/common/utils.lua
@@ -112,10 +112,10 @@ function utils.format_replicaset_error(replicaset_uuid, msg, ...)
 end
 
 local function get_replicaset_by_replica_uuid(replicasets, uuid)
-    for replicaset_uuid, replicaset in pairs(replicasets) do
-        for replica_uuid, _ in pairs(replicaset.replicas) do
-            if replica_uuid == uuid then
-                return replicasets[replicaset_uuid]
+    for _, replicaset in pairs(replicasets) do
+        for _, replica in pairs(replicaset.replicas) do
+            if replica.uuid == uuid then
+                return replicaset
             end
         end
     end
@@ -1143,23 +1143,23 @@ function utils.storage_info(opts)
     local timeout = opts.timeout or const.DEFAULT_VSHARD_CALL_TIMEOUT
 
     for _, replicaset in pairs(replicasets) do
-        for replica_uuid, replica in pairs(replicaset.replicas) do
-            replica_state_by_uuid[replica_uuid] = {
+        for _, replica in pairs(replicaset.replicas) do
+            replica_state_by_uuid[replica.uuid] = {
                 status = "error",
                 is_master = replicaset.master == replica
             }
             local ok, res = pcall(replica.conn.call, replica.conn, CRUD_STORAGE_INFO_FUNC_NAME,
                                   {}, async_opts)
             if ok then
-                futures_by_replicas[replica_uuid] = res
+                futures_by_replicas[replica.uuid] = res
             else
-                local err_msg = string.format("Error getting storage info for %s", replica_uuid)
+                local err_msg = string.format("Error getting storage info for %s", replica.uuid)
                 if res ~= nil then
                     log.error("%s: %s", err_msg, res)
-                    replica_state_by_uuid[replica_uuid].message = tostring(res)
+                    replica_state_by_uuid[replica.uuid].message = tostring(res)
                 else
                     log.error(err_msg)
-                    replica_state_by_uuid[replica_uuid].message = err_msg
+                    replica_state_by_uuid[replica.uuid].message = err_msg
                 end
             end
         end
@@ -1314,4 +1314,29 @@ function utils.is_cartridge_hotreload_supported()
     return true, cartridge_hotreload
 end
 
+function utils.get_self_vshard_replicaset()
+    local box_info = box.info()
+
+    local ok, storage_info = pcall(vshard.storage.info)
+    assert(ok, 'vshard.storage.cfg() must be called first')
+
+    local replicaset_uuid
+    if box_info.replicaset ~= nil then
+        replicaset_uuid = box_info.replicaset.uuid
+    else
+        replicaset_uuid = box_info.cluster.uuid
+    end
+
+    local replicaset
+    -- Identification key may be name since vshard 0.1.25.
+    -- See also https://github.com/tarantool/vshard/issues/460.
+    for _, v in pairs(storage_info.replicasets) do
+        if v.uuid == replicaset_uuid then
+            replicaset = v
+        end
+    end
+
+    return replicaset_uuid, replicaset
+end
+
 return utils
diff --git a/crud/readview.lua b/crud/readview.lua
index 19742175..e5c5ece0 100644
--- a/crud/readview.lua
+++ b/crud/readview.lua
@@ -260,16 +260,16 @@ function Readview_obj:close(opts)
 
     local errors = {}
     for _, replicaset in pairs(replicasets) do
-        for replica_uuid, replica in pairs(replicaset.replicas) do
+        for _, replica in pairs(replicaset.replicas) do
             for _, value in pairs(self._uuid) do
-                if replica_uuid == value.uuid then
+                if replica.uuid == value.uuid then
                     local replica_result, replica_err = replica.conn:call(CRUD_CLOSE_FUNC_NAME,
                     {self._uuid}, {timeout = opts.timeout})
                     if replica_err ~= nil then
                         table.insert(errors, ReadviewError:new("Failed to close Readview on storage: %s", replica_err))
                     end
                     if replica_err == nil and (not replica_result) then
-                        table.insert(errors, ReadviewError:new("Readview was not found on storage: %s", replica_uuid))
+                        table.insert(errors, ReadviewError:new("Readview was not found on storage: %s", replica.uuid))
                     end
                 end
             end
diff --git a/test/helper.lua b/test/helper.lua
index 576c84e1..3d044f70 100644
--- a/test/helper.lua
+++ b/test/helper.lua
@@ -232,7 +232,7 @@ end
 
 function helpers.get_test_vshard_sharding()
     local sharding = {
-        {
+        ['s-1'] = {
             replicas = {
                 ['s1-master'] = {
                     instance_uuid = helpers.uuid('b', 1),
@@ -243,7 +243,7 @@ function helpers.get_test_vshard_sharding()
                 },
             },
         },
-        {
+        ['s-2'] = {
             replicas = {
                 ['s2-master'] = {
                     instance_uuid = helpers.uuid('c', 1),
@@ -361,12 +361,12 @@ function helpers.get_other_storage_bucket_id(cluster, bucket_id)
 
         local replicasets = vshard.router.routeall()
 
-        local other_replicaset_uuid
-        for replicaset_uuid, replicaset in pairs(replicasets) do
+        local other_replicaset
+        for _, replicaset in pairs(replicasets) do
             local stat, err = replicaset:callrw('vshard.storage.bucket_stat', {bucket_id})
 
             if err ~= nil and err.name == 'WRONG_BUCKET' then
-                other_replicaset_uuid = replicaset_uuid
+                other_replicaset = replicaset
                 break
             end
 
@@ -378,13 +378,8 @@ function helpers.get_other_storage_bucket_id(cluster, bucket_id)
             end
         end
 
-        if other_replicaset_uuid == nil then
-            return nil, 'Other replicaset is not found'
-        end
-
-        local other_replicaset = replicasets[other_replicaset_uuid]
         if other_replicaset == nil then
-            return nil, string.format('Replicaset %s not found', other_replicaset_uuid)
+            return nil, 'Other replicaset is not found'
         end
 
         local buckets_info = other_replicaset:callrw('vshard.storage.buckets_info')
@@ -734,7 +729,7 @@ function helpers.start_cluster(g, cartridge_cfg, vshard_cfg)
         local cfg = table.deepcopy(vshard_cfg)
         cfg.engine = g.params.engine
 
-        g.cfg = vtest.config_new(cfg)
+        g.cfg = vtest.config_new(cfg, g.params.backend_cfg)
         vtest.cluster_new(g, g.cfg)
         g.cfg.engine = nil
     end
@@ -756,14 +751,57 @@ function helpers.get_router(cluster, backend)
     end
 end
 
+function helpers.parse_module_version(str)
+    -- https://github.com/tarantool/luatest/blob/f37b353b77be50a1f1ce87c1ff2edf0c1b96d5d1/luatest/utils.lua#L166-L173
+    local splitstr = str:split('.')
+    local major = tonumber(splitstr[1]:match('%d+'))
+    local minor = tonumber(splitstr[2]:match('%d+'))
+    local patch = tonumber(splitstr[3]:match('%d+'))
+    return luatest_utils.version(major, minor, patch)
+end
+
+function helpers.is_name_supported_as_vshard_id()
+    local vshard_version = helpers.parse_module_version(require('vshard')._VERSION)
+    local is_vshard_supports = luatest_utils.version_ge(vshard_version,
+        luatest_utils.version(0, 1, 25))
+
+    local tarantool_version = luatest_utils.get_tarantool_version()
+    local is_tarantool_supports = luatest_utils.version_ge(tarantool_version,
+        luatest_utils.version(3, 0, 0))
+    return is_vshard_supports and is_tarantool_supports
+end
+
 function helpers.backend_matrix(base_matrix)
     base_matrix = base_matrix or {{}}
-    local backends = {helpers.backend.VSHARD, helpers.backend.CARTRIDGE}
+    local backend_params = {
+        {
+            backend = helpers.backend.CARTRIDGE,
+            backend_cfg = nil,
+        },
+    }
+
+    if helpers.is_name_supported_as_vshard_id() then
+        table.insert(backend_params, {
+            backend = helpers.backend.VSHARD,
+            backend_cfg = {identification_mode = 'uuid_as_key'},
+        })
+        table.insert(backend_params, {
+            backend = helpers.backend.VSHARD,
+            backend_cfg = {identification_mode = 'name_as_key'},
+        })
+    else
+        table.insert(backend_params, {
+            backend = helpers.backend.VSHARD,
+            backend_cfg = nil,
+        })
+    end
+
     local matrix = {}
-    for _, backend in ipairs(backends) do
+    for _, params in ipairs(backend_params) do
         for _, base in ipairs(base_matrix) do
             base = table.deepcopy(base)
-            base.backend = backend
+            base.backend = params.backend
+            base.backend_cfg = params.backend_cfg
             table.insert(matrix, base)
         end
     end
diff --git a/test/performance/perf_test.lua b/test/performance/perf_test.lua
index 7f8ecc8a..cfaeb540 100644
--- a/test/performance/perf_test.lua
+++ b/test/performance/perf_test.lua
@@ -24,7 +24,7 @@ end
 
 local vshard_cfg_template = {
     sharding = {
-        {
+        ['s-1'] = {
             replicas = {
                 ['s1-master'] = {
                     master = true,
@@ -32,7 +32,7 @@ local vshard_cfg_template = {
                 ['s1-replica'] = {},
             },
         },
-        {
+        ['s-2'] = {
             replicas = {
                 ['s2-master'] = {
                     master = true,
@@ -40,7 +40,7 @@ local vshard_cfg_template = {
                 ['s2-replica'] = {},
             },
         },
-        {
+        ['s-3'] = {
             replicas = {
                 ['s3-master'] = {
                     master = true,
diff --git a/test/unit/call_test.lua b/test/unit/call_test.lua
index 3a5f16a4..9405e200 100644
--- a/test/unit/call_test.lua
+++ b/test/unit/call_test.lua
@@ -8,7 +8,7 @@ local pgroup = t.group('call', helpers.backend_matrix())
 
 local vshard_cfg_template = {
     sharding = {
-        {
+        ['s-1'] = {
             replicas = {
                 ['s1-master'] = {
                     master = true,
@@ -16,7 +16,7 @@ local vshard_cfg_template = {
                 ['s1-replica'] = {},
             },
         },
-        {
+        ['s-2'] = {
             replicas = {
                 ['s2-master'] = {
                     master = true,
diff --git a/test/unit/not_initialized_test.lua b/test/unit/not_initialized_test.lua
index 772ef861..cdd6ae20 100644
--- a/test/unit/not_initialized_test.lua
+++ b/test/unit/not_initialized_test.lua
@@ -9,7 +9,7 @@ local pgroup = t.group('not-initialized', helpers.backend_matrix({
 
 local vshard_cfg_template = {
     sharding = {
-        {
+        storages = {
             replicas = {
                 storage = {
                     master = true,
diff --git a/test/unit/stats_test.lua b/test/unit/stats_test.lua
index a66bf8ad..4681ccf3 100644
--- a/test/unit/stats_test.lua
+++ b/test/unit/stats_test.lua
@@ -45,6 +45,7 @@ local function enable_stats(g, params)
     if params ~= nil then
         params = table.deepcopy(params)
         params.backend = nil
+        params.backend_cfg = nil
     end
     g.router:eval("stats_module.enable(...)", { params })
 end
diff --git a/test/vshard_helpers/server.lua b/test/vshard_helpers/server.lua
index 272efa2b..2e172401 100644
--- a/test/vshard_helpers/server.lua
+++ b/test/vshard_helpers/server.lua
@@ -204,6 +204,23 @@ function Server:replicaset_uuid()
     return uuid
 end
 
+function Server:replicaset_name()
+    -- Cache the value when found it first time.
+    if self.replicaset_name_value then
+        return self.replicaset_name_value
+    end
+    local name = self:exec(function()
+        local info = box.info
+        if info.replicaset then
+            return info.replicaset.name
+        end
+        return nil
+    end)
+
+    self.replicaset_uuid_value = name
+    return name
+end
+
 function Server:election_term()
     return self:exec(function() return box.info.election.term end)
 end
diff --git a/test/vshard_helpers/vtest.lua b/test/vshard_helpers/vtest.lua
index b0662010..0097d919 100644
--- a/test/vshard_helpers/vtest.lua
+++ b/test/vshard_helpers/vtest.lua
@@ -87,15 +87,18 @@ end
 -- Build a valid vshard config by a template. A template does not specify
 -- anything volatile such as URIs, UUIDs - these are installed at runtime.
 --
-local function config_new(templ)
+local function config_new(templ, additional_cfg)
+    additional_cfg = additional_cfg or {}
+
     local res = table.deepcopy(templ)
     local sharding = {}
     res.sharding = sharding
     -- Is supposed to intensify reconnects when replication and listen URIs
     -- change.
     res.replication_timeout = 0.1
-    for i, replicaset_templ in pairs(templ.sharding) do
-        local replicaset_uuid = replicaset_name_to_uuid(i)
+    res.identification_mode = additional_cfg.identification_mode
+    for replicaset_name, replicaset_templ in pairs(templ.sharding) do
+        local replicaset_uuid = replicaset_name_to_uuid(replicaset_name)
         local replicas = {}
         local replicaset = table.deepcopy(replicaset_templ)
         replicaset.replicas = replicas
@@ -110,7 +113,6 @@ local function config_new(templ)
             replica.port_uri = nil
             replica.port_count = nil
             replica.instance_uuid = nil
-            replica.name = replica_name
 
             local port_count = replica_templ.port_count
             local creds = 'storage:storage@'
@@ -146,10 +148,25 @@ local function config_new(templ)
                     }
                 }
             end
-            replicas[replica_uuid] = replica
+
+            if res.identification_mode == 'name_as_key' then
+                replica.uuid = replica_uuid
+                replicas[replica_name] = replica
+            else
+                replica.name = replica_name
+                replicas[replica_uuid] = replica
+            end
+        end
+
+        if res.identification_mode == 'name_as_key' then
+            replicaset.uuid = replicaset_uuid
+            sharding[replicaset_name] = replicaset
+        else
+            replicaset.name = replicaset_name
+            sharding[replicaset_uuid] = replicaset
         end
-        sharding[replicaset_uuid] = replicaset
     end
+
     return res
 end
 
@@ -222,15 +239,38 @@ local function cluster_bootstrap(g, cfg)
     local masters = {}
     local etalon_balance = {}
     local replicaset_count = 0
-    for rs_uuid, rs in pairs(cfg.sharding) do
+
+    for rs_id, rs in pairs(cfg.sharding) do
         local is_master_found = false
-        for _, rep in pairs(rs.replicas) do
+
+        local rs_uuid
+        if cfg.identification_mode == 'name_as_key' then
+            rs_uuid = rs.uuid
+        else
+            rs_uuid = rs_id
+        end
+
+        for rep_id, rep in pairs(rs.replicas) do
             if rep.master then
                 t.assert(not is_master_found, 'only one master')
-                local server = g.cluster[rep.name]
-                t.assert_not_equals(server, nil, 'find master instance')
-                t.assert_equals(server:replicaset_uuid(), rs_uuid,
-                                'replicaset uuid')
+
+                local rep_name
+                if cfg.identification_mode == 'name_as_key' then
+                    rep_name = rep_id
+                else
+                    rep_name = rep.name
+                end
+
+                local server = g.cluster[rep_name]
+                t.assert_not_equals(server, nil, ('find master instance for %s'):format(rep_name))
+
+                if cfg.identification_mode == 'name_as_key' then
+                    t.assert_equals(server:replicaset_name(), rs_id,
+                                    'replicaset name')
+                else
+                    t.assert_equals(server:replicaset_uuid(), rs_id,
+                                    'replicaset uuid')
+                end
                 masters[rs_uuid] = server
                 is_master_found = true
             end
@@ -334,7 +374,7 @@ local function cluster_new(g, cfg)
     cfg.all_init = nil
     cfg.crud_init = nil
 
-    for replicaset_uuid, replicaset in pairs(cfg.sharding) do
+    for replicaset_id, replicaset in pairs(cfg.sharding) do
         -- Luatest depends on box.cfg being ready and listening. Need to
         -- configure it before vshard.storage.cfg().
         local box_repl = {}
@@ -346,11 +386,24 @@ local function cluster_new(g, cfg)
             -- Speed retries up.
             replication_timeout = 0.1,
         }
-        for replica_uuid, replica in pairs(replicaset.replicas) do
-            local name = replica.name
-            box_cfg.instance_uuid = replica_uuid
-            box_cfg.replicaset_uuid = replicaset_uuid
-            box_cfg.listen = instance_uri(replica.name)
+        for replica_id, replica in pairs(replicaset.replicas) do
+            local name
+
+            if cfg.identification_mode == 'name_as_key' then
+                name = replica_id
+
+                box_cfg.instance_uuid = replica.uuid
+                box_cfg.replicaset_uuid = replicaset.uuid
+                box_cfg.instance_name = replica_id
+                box_cfg.replicaset_name = replicaset_id
+            else
+                name = replica.name
+
+                box_cfg.instance_uuid = replica_id
+                box_cfg.replicaset_uuid = replicaset_id
+            end
+
+            box_cfg.listen = instance_uri(name)
             -- Need to specify read-only explicitly to know how is master.
             box_cfg.read_only = not replica.master
             box_cfg.memtx_use_mvcc_engine = cfg.memtx_use_mvcc_engine
@@ -364,7 +417,7 @@ local function cluster_new(g, cfg)
             -- VShard specific details to use in various helper functions.
             server.vtest = {
                 name = name,
-                replicaset = replicaset_uuid,
+                replicaset = replicaset_id,
                 is_storage = true,
                 master = replica.master,
             }
@@ -392,7 +445,15 @@ local function cluster_new(g, cfg)
             box.session.su('admin')
 
             cfg.engine = nil
-            require('vshard.storage').cfg(cfg, box.info.uuid)
+
+            local id
+            if cfg.identification_mode == 'name_as_key' then
+                id = box.info.name
+            else
+                id = box.info.uuid
+            end
+            require('vshard.storage').cfg(cfg, id)
+
             box.schema.user.grant('storage', 'write,read', 'universe')
 
             box.session.su(user)
@@ -403,7 +464,14 @@ local function cluster_new(g, cfg)
         replica:wait_for_readiness()
         replica:exec(function(cfg)
             cfg.engine = nil
-            require('vshard.storage').cfg(cfg, box.info.uuid)
+
+            local id
+            if cfg.identification_mode == 'name_as_key' then
+                id = box.info.name
+            else
+                id = box.info.uuid
+            end
+            require('vshard.storage').cfg(cfg, id)
         end, {cfg})
     end