From 55879eed9d57be7d3c2f74fa6c22910b5b8ce1e8 Mon Sep 17 00:00:00 2001 From: Lchangliang <915311741@qq.com> Date: Mon, 8 Jul 2024 19:27:03 +0800 Subject: [PATCH] (cloud-merge) Supports online capacity expansion and contraction --- .../http/action/clear_file_cache_action.cpp | 40 --- be/src/http/action/clear_file_cache_action.h | 32 -- be/src/http/action/file_cache_action.cpp | 54 +++- be/src/io/cache/block_file_cache.cpp | 69 +++- be/src/io/cache/block_file_cache.h | 10 +- be/src/io/cache/block_file_cache_factory.cpp | 19 +- be/src/io/cache/block_file_cache_factory.h | 9 + be/src/io/cache/file_cache_common.cpp | 1 + be/src/olap/options.cpp | 9 +- be/src/service/http_service.cpp | 14 +- be/test/io/cache/block_file_cache_test.cpp | 88 +++++- .../cache/compaction/test_stale_rowset.groovy | 6 +- .../cache/http/test_clear_cache.groovy | 5 +- .../cache/http/test_clear_cache_async.groovy | 5 +- .../cache/http/test_reset_capacity.groovy | 297 ++++++++++++++++++ .../read_write/sync_insert.groovy | 6 +- .../read_write/test_multi_stale_rowset.groovy | 4 +- .../cluster/test_warm_up_cluster.groovy | 4 +- .../cluster/test_warm_up_cluster_batch.groovy | 6 +- .../test_warm_up_cluster_bigsize.groovy | 6 +- .../cluster/test_warm_up_cluster_empty.groovy | 6 +- .../table/test_warm_up_partition.groovy | 6 +- .../warm_up/table/test_warm_up_table.groovy | 4 +- .../warm_up/table/test_warm_up_tables.groovy | 6 +- .../cloud_p0/cache/ttl/alter_ttl_1.groovy | 6 +- .../cloud_p0/cache/ttl/alter_ttl_2.groovy | 6 +- .../cloud_p0/cache/ttl/alter_ttl_3.groovy | 6 +- .../cloud_p0/cache/ttl/alter_ttl_4.groovy | 6 +- .../cache/ttl/alter_ttl_max_int64.groovy | 6 +- .../cache/ttl/alter_ttl_random.groovy | 6 +- .../cache/ttl/alter_ttl_seconds.groovy | 6 +- .../cache/ttl/create_table_as_select.groovy | 6 +- .../cache/ttl/create_table_like.groovy | 6 +- .../suites/cloud_p0/cache/ttl/test_ttl.groovy | 6 +- .../cache/ttl/test_ttl_preempt.groovy | 6 +- 35 files changed, 598 insertions(+), 174 deletions(-) delete mode 100644 be/src/http/action/clear_file_cache_action.cpp delete mode 100644 be/src/http/action/clear_file_cache_action.h create mode 100644 regression-test/suites/cloud_p0/cache/http/test_reset_capacity.groovy diff --git a/be/src/http/action/clear_file_cache_action.cpp b/be/src/http/action/clear_file_cache_action.cpp deleted file mode 100644 index 6a4a2517508824..00000000000000 --- a/be/src/http/action/clear_file_cache_action.cpp +++ /dev/null @@ -1,40 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#include "http/action/clear_file_cache_action.h" - -#include - -#include "common/logging.h" -#include "http/http_channel.h" -#include "http/http_headers.h" -#include "http/http_request.h" -#include "io/cache/block_file_cache_factory.h" - -namespace doris { - -const std::string SYNC = "sync"; - -void ClearFileCacheAction::handle(HttpRequest* req) { - req->add_output_header(HttpHeaders::CONTENT_TYPE, "application/json"); - std::string sync = req->param(SYNC); - auto ret = - io::FileCacheFactory::instance()->clear_file_caches(sync == "TRUE" || sync == "true"); - HttpChannel::send_reply(req, HttpStatus::OK, ret); -} - -} // namespace doris diff --git a/be/src/http/action/clear_file_cache_action.h b/be/src/http/action/clear_file_cache_action.h deleted file mode 100644 index 25ebdd7cb5efab..00000000000000 --- a/be/src/http/action/clear_file_cache_action.h +++ /dev/null @@ -1,32 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#pragma once - -#include "http/http_handler.h" - -namespace doris { -class ExecEnv; -class ClearFileCacheAction : public HttpHandler { -public: - ClearFileCacheAction() = default; - - ~ClearFileCacheAction() override = default; - - void handle(HttpRequest* req) override; -}; -} // namespace doris diff --git a/be/src/http/action/file_cache_action.cpp b/be/src/http/action/file_cache_action.cpp index cee37f2115d6c7..acad2b3b7bf96c 100644 --- a/be/src/http/action/file_cache_action.cpp +++ b/be/src/http/action/file_cache_action.cpp @@ -33,25 +33,59 @@ namespace doris { -const static std::string HEADER_JSON = "application/json"; -const static std::string OP = "op"; +constexpr static std::string_view HEADER_JSON = "application/json"; +constexpr static std::string_view OP = "op"; +constexpr static std::string_view SYNC = "sync"; +constexpr static std::string_view PATH = "path"; +constexpr static std::string_view CLEAR = "clear"; +constexpr static std::string_view RESET = "reset"; +constexpr static std::string_view CAPACITY = "capacity"; +constexpr static std::string_view RELEASE = "release"; +constexpr static std::string_view BASE_PATH = "base_path"; +constexpr static std::string_view RELEASED_ELEMENTS = "released_elements"; Status FileCacheAction::_handle_header(HttpRequest* req, std::string* json_metrics) { - req->add_output_header(HttpHeaders::CONTENT_TYPE, HEADER_JSON.c_str()); - std::string operation = req->param(OP); - if (operation == "release") { + req->add_output_header(HttpHeaders::CONTENT_TYPE, HEADER_JSON.data()); + std::string operation = req->param(OP.data()); + Status st = Status::OK(); + if (operation == RELEASE) { size_t released = 0; - if (req->param("base_path") != "") { - released = io::FileCacheFactory::instance()->try_release(req->param("base_path")); + const std::string& base_path = req->param(BASE_PATH.data()); + if (!base_path.empty()) { + released = io::FileCacheFactory::instance()->try_release(base_path); } else { released = io::FileCacheFactory::instance()->try_release(); } EasyJson json; - json["released_elements"] = released; + json[RELEASED_ELEMENTS.data()] = released; *json_metrics = json.ToString(); - return Status::OK(); + } else if (operation == CLEAR) { + const std::string& sync = req->param(SYNC.data()); + auto ret = io::FileCacheFactory::instance()->clear_file_caches(to_lower(sync) == "true"); + } else if (operation == RESET) { + Status st; + std::string capacity = req->param(CAPACITY.data()); + int64_t new_capacity = 0; + bool parse = true; + try { + new_capacity = std::stoll(capacity); + } catch (...) { + parse = false; + } + if (!parse || new_capacity <= 0) { + st = Status::InvalidArgument( + "The capacity {} failed to be parsed, the capacity needs to be in " + "the interval (0, INT64_MAX]", + capacity); + } else { + const std::string& path = req->param(PATH.data()); + auto ret = io::FileCacheFactory::instance()->reset_capacity(path, new_capacity); + LOG(INFO) << ret; + } + } else { + st = Status::InternalError("invalid operation: {}", operation); } - return Status::InternalError("invalid operation: {}", operation); + return st; } void FileCacheAction::handle(HttpRequest* req) { diff --git a/be/src/io/cache/block_file_cache.cpp b/be/src/io/cache/block_file_cache.cpp index a2ec9f82c863c3..33858e9ac538ee 100644 --- a/be/src/io/cache/block_file_cache.cpp +++ b/be/src/io/cache/block_file_cache.cpp @@ -1408,11 +1408,72 @@ int disk_used_percentage(const std::string& path, std::pair* percent) return 0; } -void BlockFileCache::check_disk_resource_limit(const std::string& path) { +std::string BlockFileCache::reset_capacity(size_t new_capacity) { + using namespace std::chrono; + int64_t space_released = 0; + size_t old_capacity = 0; + std::stringstream ss; + ss << "finish reset_capacity, path=" << _cache_base_path; + auto start_time = steady_clock::time_point(); + { + std::lock_guard cache_lock(_mutex); + if (new_capacity < _capacity && new_capacity < _cur_cache_size) { + int64_t need_remove_size = _cur_cache_size - new_capacity; + auto remove_blocks = [&](LRUQueue& queue) -> int64_t { + int64_t queue_released = 0; + for (const auto& [entry_key, entry_offset, entry_size] : queue) { + if (need_remove_size <= 0) return queue_released; + auto* cell = get_cell(entry_key, entry_offset, cache_lock); + if (!cell->releasable()) continue; + cell->is_deleted = true; + need_remove_size -= entry_size; + space_released += entry_size; + queue_released += entry_size; + } + return queue_released; + }; + int64_t queue_released = remove_blocks(_disposable_queue); + ss << " disposable_queue released " << queue_released; + queue_released = remove_blocks(_normal_queue); + ss << " normal_queue released " << queue_released; + queue_released = remove_blocks(_index_queue); + ss << " index_queue released " << queue_released; + if (need_remove_size >= 0) { + queue_released = 0; + for (auto& [_, key] : _time_to_key) { + for (auto& [_, cell] : _files[key]) { + if (need_remove_size <= 0) break; + cell.is_deleted = true; + need_remove_size -= cell.file_block->range().size(); + space_released += cell.file_block->range().size(); + queue_released += cell.file_block->range().size(); + } + } + ss << " ttl_queue released " << queue_released; + } + _disk_resource_limit_mode = true; + _async_clear_file_cache = true; + ss << " total_space_released=" << space_released; + } + old_capacity = _capacity; + _capacity = new_capacity; + } + auto use_time = duration_cast(steady_clock::time_point() - start_time); + LOG(INFO) << "Finish tag deleted block. path=" << _cache_base_path + << " use_time=" << static_cast(use_time.count()); + ss << " old_capacity=" << old_capacity << " new_capacity=" << new_capacity; + LOG(INFO) << ss.str(); + return ss.str(); +} + +void BlockFileCache::check_disk_resource_limit() { + if (_capacity > _cur_cache_size) { + _disk_resource_limit_mode = false; + } std::pair percent; - int ret = disk_used_percentage(path, &percent); + int ret = disk_used_percentage(_cache_base_path, &percent); if (ret != 0) { - LOG_ERROR("").tag("file cache path", path).tag("error", strerror(errno)); + LOG_ERROR("").tag("file cache path", _cache_base_path).tag("error", strerror(errno)); return; } auto [capacity_percentage, inode_percentage] = percent; @@ -1452,7 +1513,7 @@ void BlockFileCache::run_background_operation() { int64_t interval_time_seconds = 20; while (!_close) { TEST_SYNC_POINT_CALLBACK("BlockFileCache::set_sleep_time", &interval_time_seconds); - check_disk_resource_limit(_cache_base_path); + check_disk_resource_limit(); { std::unique_lock close_lock(_close_mtx); _close_cv.wait_for(close_lock, std::chrono::seconds(interval_time_seconds)); diff --git a/be/src/io/cache/block_file_cache.h b/be/src/io/cache/block_file_cache.h index c9668b236c8198..cafb57f9a1ec3c 100644 --- a/be/src/io/cache/block_file_cache.h +++ b/be/src/io/cache/block_file_cache.h @@ -95,6 +95,14 @@ class BlockFileCache { */ std::string clear_file_cache_async(); std::string clear_file_cache_directly(); + + /** + * Reset the cache capacity. If the new_capacity is smaller than _capacity, the redundant data will be remove async. + * + * @returns summary message + */ + std::string reset_capacity(size_t new_capacity); + std::map get_blocks_by_key(const UInt128Wrapper& hash); /// For debug. std::string dump_structure(const UInt128Wrapper& hash); @@ -358,7 +366,7 @@ class BlockFileCache { size_t get_used_cache_size_unlocked(FileCacheType type, std::lock_guard& cache_lock) const; - void check_disk_resource_limit(const std::string& path); + void check_disk_resource_limit(); size_t get_available_cache_size_unlocked(FileCacheType type, std::lock_guard& cache_lock) const; diff --git a/be/src/io/cache/block_file_cache_factory.cpp b/be/src/io/cache/block_file_cache_factory.cpp index a6df98c686dcce..2c15d440be1aa8 100644 --- a/be/src/io/cache/block_file_cache_factory.cpp +++ b/be/src/io/cache/block_file_cache_factory.cpp @@ -83,8 +83,8 @@ Status FileCacheFactory::create_file_cache(const std::string& cache_base_path, size_t disk_capacity = static_cast( static_cast(stat.f_blocks) * static_cast(stat.f_bsize) * (static_cast(config::file_cache_enter_disk_resource_limit_mode_percent) / 100)); - if (disk_capacity < file_cache_settings.capacity) { - LOG_INFO("The cache {} config size {} is larger than {}% disk size {}, recalc it.", + if (file_cache_settings.capacity == 0 || disk_capacity < file_cache_settings.capacity) { + LOG_INFO("The cache {} config size {} is larger than {}% disk size {} or zero, recalc it.", cache_base_path, file_cache_settings.capacity, config::file_cache_enter_disk_resource_limit_mode_percent, disk_capacity); file_cache_settings = @@ -143,5 +143,20 @@ std::vector FileCacheFactory::get_base_paths() { return paths; } +std::string FileCacheFactory::reset_capacity(const std::string& path, int64_t new_capacity) { + if (path.empty()) { + std::stringstream ss; + for (auto& [_, cache] : _path_to_cache) { + ss << cache->reset_capacity(new_capacity); + } + return ss.str(); + } else { + if (auto iter = _path_to_cache.find(path); iter != _path_to_cache.end()) { + return iter->second->reset_capacity(new_capacity); + } + } + return "Unknown the cache path " + path; +} + } // namespace io } // namespace doris diff --git a/be/src/io/cache/block_file_cache_factory.h b/be/src/io/cache/block_file_cache_factory.h index 696dae6fdc5582..6365fab31057ac 100644 --- a/be/src/io/cache/block_file_cache_factory.h +++ b/be/src/io/cache/block_file_cache_factory.h @@ -70,6 +70,15 @@ class FileCacheFactory { std::vector get_base_paths(); + /** + * Clears data of all file cache instances + * + * @param path file cache absolute path + * @param new_capacity + * @return summary message + */ + std::string reset_capacity(const std::string& path, int64_t new_capacity); + FileCacheFactory() = default; FileCacheFactory& operator=(const FileCacheFactory&) = delete; FileCacheFactory(const FileCacheFactory&) = delete; diff --git a/be/src/io/cache/file_cache_common.cpp b/be/src/io/cache/file_cache_common.cpp index 3ce647b4a0d704..61e873e04c6cbe 100644 --- a/be/src/io/cache/file_cache_common.cpp +++ b/be/src/io/cache/file_cache_common.cpp @@ -30,6 +30,7 @@ FileCacheSettings get_file_cache_settings(size_t capacity, size_t max_query_cach size_t normal_percent, size_t disposable_percent, size_t index_percent) { io::FileCacheSettings settings; + if (capacity == 0) return settings; settings.capacity = capacity; settings.max_file_block_size = config::file_cache_each_block_size; settings.max_query_cache_size = max_query_cache_size; diff --git a/be/src/olap/options.cpp b/be/src/olap/options.cpp index bf472b6ef52bd6..cd53e6c0b1ffa9 100644 --- a/be/src/olap/options.cpp +++ b/be/src/olap/options.cpp @@ -221,7 +221,7 @@ Status parse_conf_cache_paths(const std::string& config_path, std::vectorregister_handler(HttpMethod::GET, "/api/meta/{op}/{tablet_id}", meta_action); - FileCacheAction* file_cache_action = _pool.add(new FileCacheAction()); - _ev_http_server->register_handler(HttpMethod::GET, "/api/file_cache", file_cache_action); - ConfigAction* update_config_action = _pool.add(new ConfigAction(ConfigActionType::UPDATE_CONFIG)); _ev_http_server->register_handler(HttpMethod::POST, "/api/update_config", update_config_action); @@ -303,9 +299,8 @@ void HttpService::register_local_handler(StorageEngine& engine) { _ev_http_server->register_handler(HttpMethod::HEAD, "/api/_binlog/_download", download_binlog_action); - ClearFileCacheAction* clear_file_cache_action = _pool.add(new ClearFileCacheAction()); - _ev_http_server->register_handler(HttpMethod::POST, "/api/clear_file_cache", - clear_file_cache_action); + FileCacheAction* file_cache_action = _pool.add(new FileCacheAction()); + _ev_http_server->register_handler(HttpMethod::POST, "/api/file_cache", file_cache_action); TabletsDistributionAction* tablets_distribution_action = _pool.add(new TabletsDistributionAction(_env, engine, TPrivilegeHier::GLOBAL, @@ -400,9 +395,8 @@ void HttpService::register_cloud_handler(CloudStorageEngine& engine) { _ev_http_server->register_handler(HttpMethod::GET, "/api/injection_point/{op}", injection_point_action); #endif - ClearFileCacheAction* clear_file_cache_action = _pool.add(new ClearFileCacheAction()); - _ev_http_server->register_handler(HttpMethod::POST, "/api/clear_file_cache", - clear_file_cache_action); + FileCacheAction* file_cache_action = _pool.add(new FileCacheAction()); + _ev_http_server->register_handler(HttpMethod::GET, "/api/file_cache", file_cache_action); auto* show_hotspot_action = _pool.add(new ShowHotspotAction(engine)); _ev_http_server->register_handler(HttpMethod::GET, "/api/hotspot/tablet", show_hotspot_action); diff --git a/be/test/io/cache/block_file_cache_test.cpp b/be/test/io/cache/block_file_cache_test.cpp index 7cbc54d095a703..e4a52c0258970a 100644 --- a/be/test/io/cache/block_file_cache_test.cpp +++ b/be/test/io/cache/block_file_cache_test.cpp @@ -182,7 +182,7 @@ TEST_F(BlockFileCacheTest, init) { { "path" : "/mnt/ssd01/clickbench/hot/be/file_cache", "total_size" : "193273528320", - "query_limit" : 38654705664 + "query_limit" : -1 } ] )"); @@ -194,14 +194,19 @@ TEST_F(BlockFileCacheTest, init) { [ { "path" : "/mnt/ssd01/clickbench/hot/be/file_cache", - "normal" : 193273528320, - "persistent" : 193273528320, - "query_limit" : "38654705664" + "total_size" : -1 } ] )"); cache_paths.clear(); EXPECT_FALSE(parse_conf_cache_paths(err_string, cache_paths)); + + err_string = std::string(R"( + [ + ] + )"); + cache_paths.clear(); + EXPECT_FALSE(parse_conf_cache_paths(err_string, cache_paths)); } void test_file_cache(io::FileCacheType cache_type) { @@ -4239,4 +4244,79 @@ TEST_F(BlockFileCacheTest, ttl_reserve_with_evict_using_lru) { } } +TEST_F(BlockFileCacheTest, reset_capacity) { + if (fs::exists(cache_base_path)) { + fs::remove_all(cache_base_path); + } + fs::create_directories(cache_base_path); + TUniqueId query_id; + query_id.hi = 1; + query_id.lo = 1; + io::FileCacheSettings settings; + settings.query_queue_size = 30; + settings.query_queue_elements = 5; + settings.index_queue_size = 30; + settings.index_queue_elements = 5; + settings.disposable_queue_size = 30; + settings.disposable_queue_elements = 5; + settings.capacity = 90; + settings.max_file_block_size = 30; + settings.max_query_cache_size = 30; + io::CacheContext context; + context.query_id = query_id; + auto key = io::BlockFileCache::hash("key1"); + auto key2 = io::BlockFileCache::hash("key2"); + io::BlockFileCache cache(cache_base_path, settings); + auto sp = SyncPoint::get_instance(); + Defer defer {[sp] { + sp->clear_call_back("BlockFileCache::set_remove_batch"); + sp->clear_call_back("BlockFileCache::set_sleep_time"); + }}; + sp->set_call_back("BlockFileCache::set_sleep_time", + [](auto&& args) { *try_any_cast(args[0]) = 1; }); + sp->set_call_back("BlockFileCache::set_remove_batch", + [](auto&& args) { *try_any_cast(args[0]) = 2; }); + sp->enable_processing(); + ASSERT_TRUE(cache.initialize()); + for (int i = 0; i < 100; i++) { + if (cache.get_lazy_open_success()) { + break; + }; + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + } + for (int64_t offset = 0; offset < 45; offset += 5) { + context.cache_type = static_cast((offset / 5) % 3); + auto holder = cache.get_or_set(key, offset, 5, context); + auto segments = fromHolder(holder); + ASSERT_EQ(segments.size(), 1); + assert_range(1, segments[0], io::FileBlock::Range(offset, offset + 4), + io::FileBlock::State::EMPTY); + ASSERT_TRUE(segments[0]->get_or_set_downloader() == io::FileBlock::get_caller_id()); + download(segments[0]); + assert_range(1, segments[0], io::FileBlock::Range(offset, offset + 4), + io::FileBlock::State::DOWNLOADED); + } + context.cache_type = io::FileCacheType::TTL; + int64_t cur_time = UnixSeconds(); + context.expiration_time = cur_time + 120; + for (int64_t offset = 45; offset < 90; offset += 5) { + auto holder = cache.get_or_set(key2, offset, 5, context); + auto segments = fromHolder(holder); + ASSERT_EQ(segments.size(), 1); + assert_range(1, segments[0], io::FileBlock::Range(offset, offset + 4), + io::FileBlock::State::EMPTY); + ASSERT_TRUE(segments[0]->get_or_set_downloader() == io::FileBlock::get_caller_id()); + download(segments[0]); + assert_range(1, segments[0], io::FileBlock::Range(offset, offset + 4), + io::FileBlock::State::DOWNLOADED); + } + std::cout << cache.reset_capacity(30) << std::endl; + while (cache._async_clear_file_cache) + ; + EXPECT_EQ(cache._cur_cache_size, 30); + if (fs::exists(cache_base_path)) { + fs::remove_all(cache_base_path); + } +} + } // namespace doris::io diff --git a/regression-test/suites/cloud_p0/cache/compaction/test_stale_rowset.groovy b/regression-test/suites/cloud_p0/cache/compaction/test_stale_rowset.groovy index 716f623cfdbcea..8975d92f2ee893 100644 --- a/regression-test/suites/cloud_p0/cache/compaction/test_stale_rowset.groovy +++ b/regression-test/suites/cloud_p0/cache/compaction/test_stale_rowset.groovy @@ -34,14 +34,14 @@ suite("test_stale_rowset") { } } String backendId = backendId_to_backendIP.keySet()[0] - def url = backendId_to_backendIP.get(backendId) + ":" + backendId_to_backendHttpPort.get(backendId) + """/api/clear_file_cache""" + def url = backendId_to_backendIP.get(backendId) + ":" + backendId_to_backendHttpPort.get(backendId) + """/api/file_cache?op=clear&sync=true""" logger.info(url) def clearFileCache = { check_func -> httpTest { endpoint "" uri url - op "post" - body "{\"sync\"=\"true\"}" + op "get" + body "" check check_func } } diff --git a/regression-test/suites/cloud_p0/cache/http/test_clear_cache.groovy b/regression-test/suites/cloud_p0/cache/http/test_clear_cache.groovy index ff729981c59b70..6407261b6fe722 100644 --- a/regression-test/suites/cloud_p0/cache/http/test_clear_cache.groovy +++ b/regression-test/suites/cloud_p0/cache/http/test_clear_cache.groovy @@ -35,14 +35,13 @@ suite("test_clear_cache") { assertEquals(backendIdToBackendIP.size(), 1) backendId = backendIdToBackendIP.keySet()[0] - def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache""" - url = url + "?sync=true" + def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/file_cache?op=clear&sync=true""" logger.info("clear file cache URL:" + url) def clearFileCache = { check_func -> httpTest { endpoint "" uri url - op "post" + op "get" body "" check check_func } diff --git a/regression-test/suites/cloud_p0/cache/http/test_clear_cache_async.groovy b/regression-test/suites/cloud_p0/cache/http/test_clear_cache_async.groovy index db2a7f4537e446..15f6f95b7761b8 100644 --- a/regression-test/suites/cloud_p0/cache/http/test_clear_cache_async.groovy +++ b/regression-test/suites/cloud_p0/cache/http/test_clear_cache_async.groovy @@ -36,14 +36,13 @@ suite("test_clear_cache_async") { assertEquals(backendIdToBackendIP.size(), 1) backendId = backendIdToBackendIP.keySet()[0] - def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache""" - url = url + "?sync=false" + def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/file_cache?op=clear&sync=false""" logger.info("clear file cache URL:" + url) def clearFileCache = { check_func -> httpTest { endpoint "" uri url - op "post" + op "get" body "" check check_func } diff --git a/regression-test/suites/cloud_p0/cache/http/test_reset_capacity.groovy b/regression-test/suites/cloud_p0/cache/http/test_reset_capacity.groovy new file mode 100644 index 00000000000000..ab27f40925df1a --- /dev/null +++ b/regression-test/suites/cloud_p0/cache/http/test_reset_capacity.groovy @@ -0,0 +1,297 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.codehaus.groovy.runtime.IOGroovyMethods + +// 1. clear file cache +// 2. load 19.5G ttl table data into cache (cache capacity is 20G) +// 3. check ttl size and total size +// 4. load 1.3G normal table data into cache (just little datas will be cached) +// 5. select some data from normal table, and it will read from s3 +// 6. select some data from ttl table, and it will not read from s3 +// 7. wait for ttl data timeout +// 8. drop the normal table and load again. All normal table datas will be cached this time. +// 9. select some data from normal table to check whether all datas are cached +suite("test_reset_capacity") { + sql """ use @regression_cluster_name1 """ + sql """ set global enable_auto_analyze = false; """ + def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="4200") """ + //doris show backends: BackendId Host HeartbeatPort BePort HttpPort BrpcPort ArrowFlightSqlPort LastStartTime LastHeartbeat Alive SystemDecommissioned TabletNum DataUsedCapacity TrashUsedCapcacity AvailCapacity TotalCapacity UsedPct MaxDiskUsedPct RemoteUsedCapacity Tag ErrMsg Version Status HeartbeatFailureCounter NodeRole + def backends = sql_return_maparray "show backends;" + assertTrue(backends.size() > 0) + String backend_id; + def backendIdToBackendIP = [:] + def backendIdToBackendHttpPort = [:] + def backendIdToBackendBrpcPort = [:] + String host = '' + for (def backend in backends) { + if (backend.keySet().contains('Host')) { + host = backend.Host + } else { + host = backend.IP + } + def cloud_tag = parseJson(backend.Tag) + if (backend.Alive.equals("true") && cloud_tag.cloud_cluster_name.contains("regression_cluster_name1")) { + backendIdToBackendIP.put(backend.BackendId, host) + backendIdToBackendHttpPort.put(backend.BackendId, backend.HttpPort) + backendIdToBackendBrpcPort.put(backend.BackendId, backend.BrpcPort) + } + } + assertEquals(backendIdToBackendIP.size(), 1) + + backendId = backendIdToBackendIP.keySet()[0] + def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/file_cache?op=clear&sync=true""" + logger.info(url) + def clearFileCache = { check_func -> + httpTest { + endpoint "" + uri url + op "get" + body "" + check check_func + } + } + + def resetUrl = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/file_cache?op=reset&capacity=""" + def capacity = "0" + logger.info(resetUrl) + def resetFileCache = { check_func -> + httpTest { + endpoint "" + uri resetUrl + capacity + op "get" + body "" + check check_func + } + } + + def getMetricsMethod = { check_func -> + httpTest { + endpoint backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendBrpcPort.get(backendId) + uri "/brpc_metrics" + op "get" + check check_func + } + } + + def s3BucketName = getS3BucketName() + def s3WithProperties = """WITH S3 ( + |"AWS_ACCESS_KEY" = "${getS3AK()}", + |"AWS_SECRET_KEY" = "${getS3SK()}", + |"AWS_ENDPOINT" = "${getS3Endpoint()}", + |"AWS_REGION" = "${getS3Region()}") + |PROPERTIES( + |"exec_mem_limit" = "8589934592", + |"load_parallelism" = "3")""".stripMargin() + + + sql new File("""${context.file.parent}/../ddl/customer_ttl_delete.sql""").text + sql new File("""${context.file.parent}/../ddl/customer_delete.sql""").text + def load_customer_ttl_once = { String table -> + def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString() + // def table = "customer" + // create table if not exists + sql (new File("""${context.file.parent}/../ddl/${table}.sql""").text + ttlProperties) + def loadLabel = table + "_" + uniqueID + // load data from cos + def loadSql = new File("""${context.file.parent}/../ddl/${table}_load.sql""").text.replaceAll("\\\$\\{s3BucketName\\}", s3BucketName) + loadSql = loadSql.replaceAll("\\\$\\{loadLabel\\}", loadLabel) + s3WithProperties + sql loadSql + + // check load state + while (true) { + def stateResult = sql "show load where Label = '${loadLabel}'" + def loadState = stateResult[stateResult.size() - 1][2].toString() + if ("CANCELLED".equalsIgnoreCase(loadState)) { + throw new IllegalStateException("load ${loadLabel} failed.") + } else if ("FINISHED".equalsIgnoreCase(loadState)) { + break + } + sleep(5000) + } + } + + def load_customer_once = { String table -> + def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString() + // def table = "customer" + // create table if not exists + sql new File("""${context.file.parent}/../ddl/${table}.sql""").text + def loadLabel = table + "_" + uniqueID + // load data from cos + def loadSql = new File("""${context.file.parent}/../ddl/${table}_load.sql""").text.replaceAll("\\\$\\{s3BucketName\\}", s3BucketName) + loadSql = loadSql.replaceAll("\\\$\\{loadLabel\\}", loadLabel) + s3WithProperties + sql loadSql + + // check load state + while (true) { + def stateResult = sql "show load where Label = '${loadLabel}'" + def loadState = stateResult[stateResult.size() - 1][2].toString() + if ("CANCELLED".equalsIgnoreCase(loadState)) { + throw new IllegalStateException("load ${loadLabel} failed.") + } else if ("FINISHED".equalsIgnoreCase(loadState)) { + break + } + sleep(5000) + } + } + + clearFileCache.call() { + respCode, body -> {} + } + + // one customer table would take about 1.3GB, the total cache size is 20GB + // the following would take 19.5G all + load_customer_ttl_once("customer_ttl") + load_customer_ttl_once("customer_ttl") + load_customer_ttl_once("customer_ttl") + load_customer_ttl_once("customer_ttl") + load_customer_ttl_once("customer_ttl") + load_customer_ttl_once("customer_ttl") + load_customer_ttl_once("customer_ttl") + load_customer_ttl_once("customer_ttl") + load_customer_ttl_once("customer_ttl") + load_customer_ttl_once("customer_ttl") + load_customer_ttl_once("customer_ttl") + load_customer_ttl_once("customer_ttl") + load_customer_ttl_once("customer_ttl") + load_customer_ttl_once("customer_ttl") + load_customer_ttl_once("customer_ttl") + + // The max ttl cache size is 90% cache capacity + long ttl_cache_size = 0 + sleep(30000) + getMetricsMethod.call() { + respCode, body -> + assertEquals("${respCode}".toString(), "200") + String out = "${body}".toString() + def strs = out.split('\n') + Boolean flag1 = false; + for (String line in strs) { + if (flag1) break; + if (line.contains("ttl_cache_size")) { + if (line.startsWith("#")) { + continue + } + def i = line.indexOf(' ') + ttl_cache_size = line.substring(i).toLong() + logger.info("current ttl_cache_size " + ttl_cache_size); + assertTrue(ttl_cache_size <= 19327352832) + flag1 = true + } + } + assertTrue(flag1) + } + capacity = "-1" + resetFileCache.call() { + respCode, body -> { + assertFalse("${respCode}".toString().equals("200")) + } + } + + capacity = "-faf13r1r" + resetFileCache.call() { + respCode, body -> { + assertFalse("${respCode}".toString().equals("200")) + } + } + + capacity = "0" + resetFileCache.call() { + respCode, body -> { + assertFalse("${respCode}".toString().equals("200")) + } + } + + capacity = "1073741824&path=/xxxxxx" // 1GB + resetFileCache.call() { + respCode, body -> { + assertEquals("${respCode}".toString(), "200") + } + } + + capacity = "1073741824" // 1GB + resetFileCache.call() { + respCode, body -> { + assertEquals("${respCode}".toString(), "200") + } + } + + sleep(60000) + getMetricsMethod.call() { + respCode, body -> + assertEquals("${respCode}".toString(), "200") + String out = "${body}".toString() + def strs = out.split('\n') + Boolean flag1 = false; + for (String line in strs) { + if (flag1) break; + if (line.contains("ttl_cache_size")) { + if (line.startsWith("#")) { + continue + } + def i = line.indexOf(' ') + ttl_cache_size = line.substring(i).toLong() + logger.info("current ttl_cache_size " + ttl_cache_size); + assertTrue(ttl_cache_size <= 1073741824) + flag1 = true + } + } + assertTrue(flag1) + } + + capacity = "1099511627776" // 1TB + resetFileCache.call() { + respCode, body -> { + assertEquals("${respCode}".toString(), "200") + } + } + load_customer_ttl_once("customer_ttl") + load_customer_ttl_once("customer_ttl") + load_customer_ttl_once("customer_ttl") + + sleep(30000) + getMetricsMethod.call() { + respCode, body -> + assertEquals("${respCode}".toString(), "200") + String out = "${body}".toString() + def strs = out.split('\n') + Boolean flag1 = false; + for (String line in strs) { + if (flag1) break; + if (line.contains("ttl_cache_size")) { + if (line.startsWith("#")) { + continue + } + def i = line.indexOf(' ') + ttl_cache_size = line.substring(i).toLong() + logger.info("current ttl_cache_size " + ttl_cache_size); + assertTrue(ttl_cache_size > 1073741824) + flag1 = true + } + } + assertTrue(flag1) + } + + capacity = "21474836480" // 20GB + resetFileCache.call() { + respCode, body -> { + assertEquals("${respCode}".toString(), "200") + } + } + + sql new File("""${context.file.parent}/../ddl/customer_delete.sql""").text +} \ No newline at end of file diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/sync_insert.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/sync_insert.groovy index bfeb1c74c8fbf6..c7731134e4a845 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/sync_insert.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/sync_insert.groovy @@ -47,9 +47,9 @@ suite("sync_insert") { def clearFileCache = { ip, port -> httpTest { endpoint "" - uri ip + ":" + port + """/api/clear_file_cache""" - op "post" - body "{\"sync\"=\"true\"}" + uri ip + ":" + port + """/api/file_cache?op=clear&sync=true""" + op "get" + body "" } } diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/test_multi_stale_rowset.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/test_multi_stale_rowset.groovy index d2de945f3e39c0..7d03f4daf82cfd 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/test_multi_stale_rowset.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/read_write/test_multi_stale_rowset.groovy @@ -48,8 +48,8 @@ suite("test_multi_stale_rowset") { def clearFileCache = { ip, port -> httpTest { endpoint "" - uri ip + ":" + port + """/api/clear_file_cache?sync=true""" - op "post" + uri ip + ":" + port + """/api/file_cache?op=clear&sync=true""" + op "get" body "" } } diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster.groovy index adc9697e15bbd6..3d22b75e98dfcf 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster.groovy @@ -76,8 +76,8 @@ suite("test_warm_up_cluster") { def clearFileCache = { ip, port -> httpTest { endpoint "" - uri ip + ":" + port + """/api/clear_file_cache?sync=true""" - op "post" + uri ip + ":" + port + """/api/file_cache?op=clear&sync=true""" + op "get" body "" } } diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_batch.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_batch.groovy index 8841582ca6c1c0..f9a5004a84e370 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_batch.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_batch.groovy @@ -75,9 +75,9 @@ suite("test_warm_up_cluster_batch") { def clearFileCache = { ip, port -> httpTest { endpoint "" - uri ip + ":" + port + """/api/clear_file_cache""" - op "post" - body "{\"sync\"=\"true\"}" + uri ip + ":" + port + """/api/file_cache?op=clear&sync=true""" + op "get" + body "" } } diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_bigsize.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_bigsize.groovy index 4c3aa6cbb86f2f..e9be62cf9821ee 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_bigsize.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_bigsize.groovy @@ -76,9 +76,9 @@ suite("test_warm_up_cluster_bigsize") { def clearFileCache = { ip, port -> httpTest { endpoint "" - uri ip + ":" + port + """/api/clear_file_cache""" - op "post" - body "{\"sync\"=\"true\"}" + uri ip + ":" + port + """/api/file_cache?op=clear&sync=true""" + op "get" + body "" } } diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_empty.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_empty.groovy index 3af7956e9ecab3..bf3121b269f6e3 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_empty.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_cluster_empty.groovy @@ -102,9 +102,9 @@ suite("test_warm_up_cluster_empty") { def clearFileCache = { ip, port -> httpTest { endpoint "" - uri ip + ":" + port + """/api/clear_file_cache""" - op "post" - body "{\"sync\"=\"true\"}" + uri ip + ":" + port + """/api/file_cache?op=clear&sync=true""" + op "get" + body "" } } diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_partition.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_partition.groovy index 912f10ce584746..0eb93f2896c39d 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_partition.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_partition.groovy @@ -95,9 +95,9 @@ suite("test_warm_up_partition") { def clearFileCache = { ip, port -> httpTest { endpoint "" - uri ip + ":" + port + """/api/clear_file_cache""" - op "post" - body "{\"sync\"=\"true\"}" + uri ip + ":" + port + """/api/file_cache?op=clear&sync=true""" + op "get" + body "" } } diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_table.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_table.groovy index ea6fba9d724f58..ffce02f4f64ce2 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_table.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_table.groovy @@ -95,8 +95,8 @@ suite("test_warm_up_table") { def clearFileCache = { ip, port -> httpTest { endpoint "" - uri ip + ":" + port + """/api/clear_file_cache?sync=true""" - op "post" + uri ip + ":" + port + """/api/file_cache?op=clear&sync=true""" + op "get" body "" } } diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_tables.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_tables.groovy index c558513fb0d266..bf39e922802576 100644 --- a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_tables.groovy +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/table/test_warm_up_tables.groovy @@ -118,9 +118,9 @@ suite("test_warm_up_tables") { def clearFileCache = { ip, port -> httpTest { endpoint "" - uri ip + ":" + port + """/api/clear_file_cache""" - op "post" - body "{\"sync\"=\"true\"}" + uri ip + ":" + port + """/api/file_cache?op=clear&sync=true""" + op "get" + body "" } } diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_1.groovy b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_1.groovy index 05dfd0fe64f516..299608f4091ab5 100644 --- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_1.groovy +++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_1.groovy @@ -35,14 +35,14 @@ suite("alter_ttl_1") { assertEquals(backendIdToBackendIP.size(), 1) backendId = backendIdToBackendIP.keySet()[0] - def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache""" + def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/file_cache?op=clear&sync=true""" logger.info(url) def clearFileCache = { check_func -> httpTest { endpoint "" uri url - op "post" - body "{\"sync\"=\"true\"}" + op "get" + body "" check check_func } } diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_2.groovy b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_2.groovy index 83e1e64178f5a9..660e822075d684 100644 --- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_2.groovy +++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_2.groovy @@ -35,14 +35,14 @@ suite("alter_ttl_2") { assertEquals(backendIdToBackendIP.size(), 1) backendId = backendIdToBackendIP.keySet()[0] - def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache""" + def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/file_cache?op=clear&sync=true""" logger.info(url) def clearFileCache = { check_func -> httpTest { endpoint "" uri url - op "post" - body "{\"sync\"=\"true\"}" + op "get" + body "" check check_func } } diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_3.groovy b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_3.groovy index 65783d9a995d6a..bd88c5287f955d 100644 --- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_3.groovy +++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_3.groovy @@ -35,14 +35,14 @@ suite("alter_ttl_3") { assertEquals(backendIdToBackendIP.size(), 1) backendId = backendIdToBackendIP.keySet()[0] - def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache""" + def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/file_cache?op=clear&sync=true""" logger.info(url) def clearFileCache = { check_func -> httpTest { endpoint "" uri url - op "post" - body "{\"sync\"=\"true\"}" + op "get" + body "" check check_func } } diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_4.groovy b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_4.groovy index 11b7c0f42119ee..2731abaef0a7d2 100644 --- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_4.groovy +++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_4.groovy @@ -35,14 +35,14 @@ suite("alter_ttl_4") { assertEquals(backendIdToBackendIP.size(), 1) backendId = backendIdToBackendIP.keySet()[0] - def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache""" + def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/file_cache?op=clear&sync=true""" logger.info(url) def clearFileCache = { check_func -> httpTest { endpoint "" uri url - op "post" - body "{\"sync\"=\"true\"}" + op "get" + body "" check check_func } } diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_max_int64.groovy b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_max_int64.groovy index 77a8bbdf78a2d3..19f946e299849a 100644 --- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_max_int64.groovy +++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_max_int64.groovy @@ -35,14 +35,14 @@ suite("test_ttl_max_int64") { assertEquals(backendIdToBackendIP.size(), 1) backendId = backendIdToBackendIP.keySet()[0] - def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache""" + def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/file_cache?op=clear&sync=true""" logger.info(url) def clearFileCache = { check_func -> httpTest { endpoint "" uri url - op "post" - body "{\"sync\"=\"true\"}" + op "get" + body "" check check_func } } diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_random.groovy b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_random.groovy index 4b8e7ebb5eecf0..7ad86c2cc53965 100644 --- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_random.groovy +++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_random.groovy @@ -35,14 +35,14 @@ suite("test_ttl_random") { assertEquals(backendIdToBackendIP.size(), 1) backendId = backendIdToBackendIP.keySet()[0] - def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache""" + def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/file_cache?op=clear&sync=true""" logger.info(url) def clearFileCache = { check_func -> httpTest { endpoint "" uri url - op "post" - body "{\"sync\"=\"true\"}" + op "get" + body "" check check_func } } diff --git a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_seconds.groovy b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_seconds.groovy index 39092582a7b2e4..6d72e51c4c0c9f 100644 --- a/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_seconds.groovy +++ b/regression-test/suites/cloud_p0/cache/ttl/alter_ttl_seconds.groovy @@ -35,14 +35,14 @@ suite("test_ttl_seconds") { assertEquals(backendIdToBackendIP.size(), 1) backendId = backendIdToBackendIP.keySet()[0] - def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache""" + def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/file_cache?op=clear&sync=true""" logger.info(url) def clearFileCache = { check_func -> httpTest { endpoint "" uri url - op "post" - body "{\"sync\"=\"true\"}" + op "get" + body "" check check_func } } diff --git a/regression-test/suites/cloud_p0/cache/ttl/create_table_as_select.groovy b/regression-test/suites/cloud_p0/cache/ttl/create_table_as_select.groovy index df4ccf886be179..47c458971bb944 100644 --- a/regression-test/suites/cloud_p0/cache/ttl/create_table_as_select.groovy +++ b/regression-test/suites/cloud_p0/cache/ttl/create_table_as_select.groovy @@ -34,14 +34,14 @@ suite("create_table_as_select") { assertEquals(backendIdToBackendIP.size(), 1) backendId = backendIdToBackendIP.keySet()[0] - def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache""" + def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/file_cache?op=clear&sync=true""" logger.info(url) def clearFileCache = { check_func -> httpTest { endpoint "" uri url - op "post" - body "{\"sync\"=\"true\"}" + op "get" + body "" check check_func } } diff --git a/regression-test/suites/cloud_p0/cache/ttl/create_table_like.groovy b/regression-test/suites/cloud_p0/cache/ttl/create_table_like.groovy index 320609f92a2373..9c927f5c025322 100644 --- a/regression-test/suites/cloud_p0/cache/ttl/create_table_like.groovy +++ b/regression-test/suites/cloud_p0/cache/ttl/create_table_like.groovy @@ -34,14 +34,14 @@ suite("create_table_like") { assertEquals(backendIdToBackendIP.size(), 1) backendId = backendIdToBackendIP.keySet()[0] - def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache""" + def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/file_cache?op=clear&sync=true""" logger.info(url) def clearFileCache = { check_func -> httpTest { endpoint "" uri url - op "post" - body "{\"sync\"=\"true\"}" + op "get" + body "" check check_func } } diff --git a/regression-test/suites/cloud_p0/cache/ttl/test_ttl.groovy b/regression-test/suites/cloud_p0/cache/ttl/test_ttl.groovy index d4101fa2840f7a..d9f928ebd89536 100644 --- a/regression-test/suites/cloud_p0/cache/ttl/test_ttl.groovy +++ b/regression-test/suites/cloud_p0/cache/ttl/test_ttl.groovy @@ -35,14 +35,14 @@ suite("test_ttl") { assertEquals(backendIdToBackendIP.size(), 1) backendId = backendIdToBackendIP.keySet()[0] - def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache""" + def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/file_cache?op=clear&sync=true""" logger.info(url) def clearFileCache = { check_func -> httpTest { endpoint "" uri url - op "post" - body "{\"sync\"=\"true\"}" + op "get" + body "" check check_func } } diff --git a/regression-test/suites/cloud_p0/cache/ttl/test_ttl_preempt.groovy b/regression-test/suites/cloud_p0/cache/ttl/test_ttl_preempt.groovy index 0f8cc2f91e27cc..e8008a05e1334f 100644 --- a/regression-test/suites/cloud_p0/cache/ttl/test_ttl_preempt.groovy +++ b/regression-test/suites/cloud_p0/cache/ttl/test_ttl_preempt.groovy @@ -35,14 +35,14 @@ suite("test_ttl_preempt") { assertEquals(backendIdToBackendIP.size(), 1) backendId = backendIdToBackendIP.keySet()[0] - def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/clear_file_cache""" + def url = backendIdToBackendIP.get(backendId) + ":" + backendIdToBackendHttpPort.get(backendId) + """/api/file_cache?op=clear&sync=true""" logger.info(url) def clearFileCache = { check_func -> httpTest { endpoint "" uri url - op "post" - body "{\"sync\"=\"true\"}" + op "get" + body "" check check_func } }