diff --git a/dbms/src/Dictionaries/Embedded/RegionsHierarchy.cpp b/dbms/src/Dictionaries/Embedded/RegionsHierarchy.cpp index cca37981577..9156d7148eb 100644 --- a/dbms/src/Dictionaries/Embedded/RegionsHierarchy.cpp +++ b/dbms/src/Dictionaries/Embedded/RegionsHierarchy.cpp @@ -12,16 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include #include - -#include +#include +#include #include - +#include #include -#include -#include +#include RegionsHierarchy::RegionsHierarchy(IRegionsHierarchyDataSourcePtr data_source_) @@ -50,13 +48,13 @@ void RegionsHierarchy::reload() RegionParents new_continent(initial_size); RegionParents new_top_continent(initial_size); RegionPopulations new_populations(initial_size); - RegionDepths new_depths(initial_size); + RegionDepths new_depths(initial_size); RegionTypes types(initial_size); RegionID max_region_id = 0; - auto regions_reader = data_source->createReader(); + auto regions_reader = data_source->createReader(); RegionEntry region_entry; while (regions_reader->readNext(region_entry)) @@ -81,16 +79,16 @@ void RegionsHierarchy::reload() types[region_entry.id] = region_entry.type; } - new_parents .resize(max_region_id + 1); - new_city .resize(max_region_id + 1); - new_country .resize(max_region_id + 1); - new_area .resize(max_region_id + 1); - new_district .resize(max_region_id + 1); - new_continent .resize(max_region_id + 1); + new_parents.resize(max_region_id + 1); + new_city.resize(max_region_id + 1); + new_country.resize(max_region_id + 1); + new_area.resize(max_region_id + 1); + new_district.resize(max_region_id + 1); + new_continent.resize(max_region_id + 1); new_top_continent.resize(max_region_id + 1); - new_populations .resize(max_region_id + 1); - new_depths .resize(max_region_id + 1); - types .resize(max_region_id + 1); + new_populations.resize(max_region_id + 1); + new_depths.resize(max_region_id + 1); + types.resize(max_region_id + 1); /// prescribe the cities and countries for the regions for (RegionID i = 0; i <= max_region_id; ++i) diff --git a/dbms/src/Dictionaries/Embedded/RegionsNames.cpp b/dbms/src/Dictionaries/Embedded/RegionsNames.cpp index b61db2f1f82..503838ce92b 100644 --- a/dbms/src/Dictionaries/Embedded/RegionsNames.cpp +++ b/dbms/src/Dictionaries/Embedded/RegionsNames.cpp @@ -33,7 +33,7 @@ RegionsNames::RegionsNames(IRegionsNamesDataProviderPtr data_provider) std::string RegionsNames::dumpSupportedLanguagesNames() { - std::string res = ""; + std::string res; for (size_t i = 0; i < LANGUAGE_ALIASES_COUNT; ++i) { if (i > 0) @@ -60,7 +60,7 @@ void RegionsNames::reload() if (!names_source->isModified()) continue; - LOG_DEBUG(log, "Reloading regions names for language: " << language); + LOG_FMT_DEBUG(log, "Reloading regions names for language: {}", language); auto names_reader = names_source->createReader(); diff --git a/dbms/src/Flash/BatchCommandsHandler.cpp b/dbms/src/Flash/BatchCommandsHandler.cpp index 51e9500f53a..262165157c9 100644 --- a/dbms/src/Flash/BatchCommandsHandler.cpp +++ b/dbms/src/Flash/BatchCommandsHandler.cpp @@ -60,7 +60,7 @@ ThreadPool::Job BatchCommandsHandler::handleCommandJob( SCOPE_EXIT({ GET_METRIC(tiflash_coprocessor_handling_request_count, type_batch_cop).Decrement(); }); const auto & cop_req = req.coprocessor(); - auto cop_resp = resp.mutable_coprocessor(); + auto * cop_resp = resp.mutable_coprocessor(); auto [context, status] = batch_commands_context.db_context_creation_func(&batch_commands_context.grpc_server_context); if (!status.ok()) @@ -86,10 +86,10 @@ grpc::Status BatchCommandsHandler::execute() /// Shortcut for only one request by not going to thread pool. if (request.requests_size() == 1) { - LOG_DEBUG(log, __PRETTY_FUNCTION__ << ": Handling the only batch command in place."); + LOG_FMT_DEBUG(log, "Handling the only batch command in place."); const auto & req = request.requests(0); - auto resp = response.add_responses(); + auto * resp = response.add_responses(); response.add_request_ids(request.request_ids(0)); auto ret = grpc::Status::OK; handleCommandJob(req, *resp, ret)(); @@ -101,9 +101,11 @@ grpc::Status BatchCommandsHandler::execute() size_t max_threads = settings.batch_commands_threads ? static_cast(settings.batch_commands_threads) : static_cast(settings.max_threads); - LOG_DEBUG( + LOG_FMT_DEBUG( log, - __PRETTY_FUNCTION__ << ": Handling " << request.requests_size() << " batch commands using " << max_threads << " threads."); + "Handling {} batch commands using {} threads.", + request.requests_size(), + max_threads); ThreadPool thread_pool(max_threads); @@ -113,7 +115,7 @@ grpc::Status BatchCommandsHandler::execute() for (const auto & req : request.requests()) { - auto resp = response.add_responses(); + auto * resp = response.add_responses(); response.add_request_ids(request.request_ids(i++)); rets.emplace_back(grpc::Status::OK); diff --git a/dbms/src/Flash/DiagnosticsService.cpp b/dbms/src/Flash/DiagnosticsService.cpp index 7092704f49e..937f2794fa8 100644 --- a/dbms/src/Flash/DiagnosticsService.cpp +++ b/dbms/src/Flash/DiagnosticsService.cpp @@ -81,7 +81,7 @@ std::list getFilesToSearch(IServer & server, Poco::Logger * log, co } } - LOG_DEBUG(log, fmt::format("{}: got log directory {}", __FUNCTION__, log_dir)); + LOG_FMT_DEBUG(log, "got log directory {}", log_dir); if (log_dir.empty()) return files_to_search; @@ -97,7 +97,7 @@ std::list getFilesToSearch(IServer & server, Poco::Logger * log, co } } - LOG_DEBUG(log, fmt::format("{}: got log files to search {}", __FUNCTION__, files_to_search)); + LOG_FMT_DEBUG(log, "got log files to search {}", files_to_search); return files_to_search; } @@ -128,7 +128,7 @@ grpc::Status searchLog(Poco::Logger * log, ::grpc::ServerWriter<::diagnosticspb: if (!stream->Write(resp)) { - LOG_DEBUG(log, __PRETTY_FUNCTION__ << ": Write response failed for unknown reason."); + LOG_FMT_DEBUG(log, "Write response failed for unknown reason."); return grpc::Status(grpc::StatusCode::UNKNOWN, "Write response failed for unknown reason."); } } @@ -158,16 +158,16 @@ ::grpc::Status DiagnosticsService::search_log( patterns.push_back(pattern); } - LOG_DEBUG(log, __PRETTY_FUNCTION__ << ": Handling SearchLog: " << request->DebugString()); + LOG_FMT_DEBUG(log, "Handling SearchLog: {}", request->DebugString()); SCOPE_EXIT({ - LOG_DEBUG(log, __PRETTY_FUNCTION__ << ": Handling SearchLog done: " << request->DebugString()); + LOG_FMT_DEBUG(log, "Handling SearchLog done: {}", request->DebugString()); }); auto files_to_search = getFilesToSearch(server, log, start_time); for (const auto & path : files_to_search) { - LOG_DEBUG(log, fmt::format("{}: start to search file {}", __FUNCTION__, path)); + LOG_FMT_DEBUG(log, "start to search file {}", path); auto status = grpc::Status::OK; ReadLogFile(path, [&](std::istream & istr) { LogIterator log_itr(start_time, end_time, levels, patterns, istr); diff --git a/dbms/src/Flash/LogSearch.cpp b/dbms/src/Flash/LogSearch.cpp index 52e650543b3..43f99973720 100644 --- a/dbms/src/Flash/LogSearch.cpp +++ b/dbms/src/Flash/LogSearch.cpp @@ -354,7 +354,7 @@ LogIterator::~LogIterator() { if (err_info.has_value()) { - LOG_DEBUG(log, "LogIterator search end with error " << std::to_string(err_info->second) << " at line " << std::to_string(err_info->first)); + LOG_FMT_DEBUG(log, "LogIterator search end with error {} at line {}.", err_info->second, err_info->first); } } diff --git a/dbms/src/Flash/Mpp/ExchangeReceiver.cpp b/dbms/src/Flash/Mpp/ExchangeReceiver.cpp index 802315b9676..d22d978b1d7 100644 --- a/dbms/src/Flash/Mpp/ExchangeReceiver.cpp +++ b/dbms/src/Flash/Mpp/ExchangeReceiver.cpp @@ -464,7 +464,7 @@ void ExchangeReceiverBase::readLoop(const Request & req) status = reader->finish(); if (status.ok()) { - LOG_DEBUG(log, "finish read : " << req.debugString()); + LOG_FMT_DEBUG(log, "finish read : {}", req.debugString()); break; } else diff --git a/dbms/src/Interpreters/Aggregator.cpp b/dbms/src/Interpreters/Aggregator.cpp index 6e0f7c2e5ee..dd69b8c1419 100644 --- a/dbms/src/Interpreters/Aggregator.cpp +++ b/dbms/src/Interpreters/Aggregator.cpp @@ -644,7 +644,7 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, co CompressedWriteBuffer compressed_buf(file_buf); NativeBlockOutputStream block_out(compressed_buf, ClickHouseRevision::get(), getHeader(false)); - LOG_DEBUG(log, "Writing part of aggregation data into temporary file " << path << "."); + LOG_FMT_DEBUG(log, "Writing part of aggregation data into temporary file {}.", path); ProfileEvents::increment(ProfileEvents::ExternalAggregationWritePart); /// Flush only two-level data and possibly overflow data. diff --git a/dbms/src/Interpreters/ExternalLoader.cpp b/dbms/src/Interpreters/ExternalLoader.cpp index 7af354ff363..5e52e04d907 100644 --- a/dbms/src/Interpreters/ExternalLoader.cpp +++ b/dbms/src/Interpreters/ExternalLoader.cpp @@ -250,7 +250,7 @@ void ExternalLoader::reloadFromConfigFile(const std::string & config_path, const { if (config_path.empty() || !config_repository->exists(config_path)) { - LOG_WARNING(log, "config file '" + config_path + "' does not exist"); + LOG_FMT_WARNING(log, "config file '{}' does not exist", config_path); } else { @@ -284,7 +284,7 @@ void ExternalLoader::reloadFromConfigFile(const std::string & config_path, const if (!startsWith(key, config_settings.external_config)) { if (!startsWith(key, "comment") && !startsWith(key, "include_from")) - LOG_WARNING(log, config_path << ": unknown node in file: '" << key << "', expected '" << config_settings.external_config << "'"); + LOG_FMT_WARNING(log, "{}: unknown node in file: '{}', expected '{}'", config_path, key, config_settings.external_config); continue; } @@ -293,7 +293,7 @@ void ExternalLoader::reloadFromConfigFile(const std::string & config_path, const name = config->getString(key + "." + config_settings.external_name); if (name.empty()) { - LOG_WARNING(log, config_path << ": " + config_settings.external_name + " name cannot be empty"); + LOG_FMT_WARNING(log, "{}: {} name cannot be empty", config_path, config_settings.external_name); continue; } diff --git a/dbms/src/Interpreters/ExternalLoader.h b/dbms/src/Interpreters/ExternalLoader.h index bee2657ecf1..26751f7c6b9 100644 --- a/dbms/src/Interpreters/ExternalLoader.h +++ b/dbms/src/Interpreters/ExternalLoader.h @@ -183,7 +183,7 @@ class ExternalLoader /// Check objects definitions in config files and reload or/and add new ones if the definition is changed /// If loadable_name is not empty, load only loadable object with name loadable_name - void reloadFromConfigFiles(bool throw_on_error, bool force_reload = false, const std::string & loadable_name = ""); + void reloadFromConfigFiles(bool throw_on_error, bool force_reload = false, const std::string & only_dictionary = ""); void reloadFromConfigFile(const std::string & config_path, bool throw_on_error, bool force_reload, const std::string & loadable_name); /// Check config files and update expired loadable objects diff --git a/dbms/src/Interpreters/InterpreterSelectQuery.cpp b/dbms/src/Interpreters/InterpreterSelectQuery.cpp index ada5c172b62..0da7649a270 100644 --- a/dbms/src/Interpreters/InterpreterSelectQuery.cpp +++ b/dbms/src/Interpreters/InterpreterSelectQuery.cpp @@ -269,8 +269,7 @@ void InterpreterSelectQuery::getAndLockStorageWithSchemaVersion(const String & d TableLockHolder lock; Int64 storage_schema_version; auto log_schema_version = [&](const String & result) { - LOG_DEBUG(log, __PRETTY_FUNCTION__ << " Table " << qualified_name << " schema " << result << " Schema version [storage, global, query]: " - << "[" << storage_schema_version << ", " << global_schema_version << ", " << query_schema_version << "]."); + LOG_FMT_DEBUG(log, "Table {} schema {} Schema version [storage, global, query]: [{}, {}, {}].", qualified_name, result, storage_schema_version, global_schema_version, query_schema_version); }; bool ok; { diff --git a/dbms/src/Interpreters/SharedQueries.h b/dbms/src/Interpreters/SharedQueries.h index 5a69da8677c..36cc28932bc 100644 --- a/dbms/src/Interpreters/SharedQueries.h +++ b/dbms/src/Interpreters/SharedQueries.h @@ -103,9 +103,11 @@ class SharedQueries { if (clients != it->second->clients) { - LOG_WARNING(log, - "Different client numbers between shared queries with same query_id(" // - << query_id << "), former: " << it->second->clients << ", now: " << clients); + LOG_FMT_WARNING(log, + "Different client numbers between shared queries with same query_id({}), former: {} now: {}", + query_id, + it->second->clients, + clients); } auto & query = *(it->second); if (query.connected_clients >= clients) @@ -143,10 +145,9 @@ class SharedQueries const auto it = queries.find(query_id); if (it == queries.end()) { - LOG_WARNING(log, - "Shared query finished with query_id(" // - << query_id << "), while resource cache not exists." - << " Maybe this client takes too long before finish"); + LOG_FMT_WARNING(log, + "Shared query finished with query_id({}), while resource cache not exists. Maybe this client takes too long before finish", + query_id); return; } auto & query = *(it->second);