Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

replace LOG_XXX with LOG_FMT_XXX in dbms/src/Dictionaries, dbms/src/Flash and dbms/src/Interpreters #4410

Merged
merged 9 commits into from
Mar 25, 2022
32 changes: 15 additions & 17 deletions dbms/src/Dictionaries/Embedded/RegionsHierarchy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include <Dictionaries/Embedded/RegionsHierarchy.h>
#include <Dictionaries/Embedded/GeodataProviders/IHierarchiesProvider.h>

#include <Poco/Util/Application.h>
#include <Dictionaries/Embedded/RegionsHierarchy.h>
#include <IO/WriteHelpers.h>
#include <Poco/Exception.h>

#include <Poco/Util/Application.h>
#include <common/logger_useful.h>
#include <ext/singleton.h>

#include <IO/WriteHelpers.h>
#include <ext/singleton.h>


RegionsHierarchy::RegionsHierarchy(IRegionsHierarchyDataSourcePtr data_source_)
Expand Down Expand Up @@ -50,13 +48,13 @@ void RegionsHierarchy::reload()
RegionParents new_continent(initial_size);
RegionParents new_top_continent(initial_size);
RegionPopulations new_populations(initial_size);
RegionDepths new_depths(initial_size);
RegionDepths new_depths(initial_size);
RegionTypes types(initial_size);

RegionID max_region_id = 0;


auto regions_reader = data_source->createReader();
auto regions_reader = data_source->createReader();

RegionEntry region_entry;
while (regions_reader->readNext(region_entry))
Expand All @@ -81,16 +79,16 @@ void RegionsHierarchy::reload()
types[region_entry.id] = region_entry.type;
}

new_parents .resize(max_region_id + 1);
new_city .resize(max_region_id + 1);
new_country .resize(max_region_id + 1);
new_area .resize(max_region_id + 1);
new_district .resize(max_region_id + 1);
new_continent .resize(max_region_id + 1);
new_parents.resize(max_region_id + 1);
new_city.resize(max_region_id + 1);
new_country.resize(max_region_id + 1);
new_area.resize(max_region_id + 1);
new_district.resize(max_region_id + 1);
new_continent.resize(max_region_id + 1);
new_top_continent.resize(max_region_id + 1);
new_populations .resize(max_region_id + 1);
new_depths .resize(max_region_id + 1);
types .resize(max_region_id + 1);
new_populations.resize(max_region_id + 1);
new_depths.resize(max_region_id + 1);
types.resize(max_region_id + 1);

/// prescribe the cities and countries for the regions
for (RegionID i = 0; i <= max_region_id; ++i)
Expand Down
4 changes: 2 additions & 2 deletions dbms/src/Dictionaries/Embedded/RegionsNames.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ RegionsNames::RegionsNames(IRegionsNamesDataProviderPtr data_provider)

std::string RegionsNames::dumpSupportedLanguagesNames()
{
std::string res = "";
std::string res;
for (size_t i = 0; i < LANGUAGE_ALIASES_COUNT; ++i)
{
if (i > 0)
Expand All @@ -60,7 +60,7 @@ void RegionsNames::reload()
if (!names_source->isModified())
continue;

LOG_DEBUG(log, "Reloading regions names for language: " << language);
LOG_FMT_DEBUG(log, "Reloading regions names for language: {}", language);

auto names_reader = names_source->createReader();

Expand Down
14 changes: 8 additions & 6 deletions dbms/src/Flash/BatchCommandsHandler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ ThreadPool::Job BatchCommandsHandler::handleCommandJob(
SCOPE_EXIT({ GET_METRIC(tiflash_coprocessor_handling_request_count, type_batch_cop).Decrement(); });

const auto & cop_req = req.coprocessor();
auto cop_resp = resp.mutable_coprocessor();
auto * cop_resp = resp.mutable_coprocessor();

auto [context, status] = batch_commands_context.db_context_creation_func(&batch_commands_context.grpc_server_context);
if (!status.ok())
Expand All @@ -86,10 +86,10 @@ grpc::Status BatchCommandsHandler::execute()
/// Shortcut for only one request by not going to thread pool.
if (request.requests_size() == 1)
{
LOG_DEBUG(log, __PRETTY_FUNCTION__ << ": Handling the only batch command in place.");
LOG_FMT_DEBUG(log, "Handling the only batch command in place.");

const auto & req = request.requests(0);
auto resp = response.add_responses();
auto * resp = response.add_responses();
response.add_request_ids(request.request_ids(0));
auto ret = grpc::Status::OK;
handleCommandJob(req, *resp, ret)();
Expand All @@ -101,9 +101,11 @@ grpc::Status BatchCommandsHandler::execute()
size_t max_threads = settings.batch_commands_threads ? static_cast<size_t>(settings.batch_commands_threads)
: static_cast<size_t>(settings.max_threads);

LOG_DEBUG(
LOG_FMT_DEBUG(
log,
__PRETTY_FUNCTION__ << ": Handling " << request.requests_size() << " batch commands using " << max_threads << " threads.");
"Handling {} batch commands using {} threads.",
request.requests_size(),
max_threads);

ThreadPool thread_pool(max_threads);

Expand All @@ -113,7 +115,7 @@ grpc::Status BatchCommandsHandler::execute()

for (const auto & req : request.requests())
{
auto resp = response.add_responses();
auto * resp = response.add_responses();
response.add_request_ids(request.request_ids(i++));
rets.emplace_back(grpc::Status::OK);

Expand Down
12 changes: 6 additions & 6 deletions dbms/src/Flash/DiagnosticsService.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ std::list<std::string> getFilesToSearch(IServer & server, Poco::Logger * log, co
}
}

LOG_DEBUG(log, fmt::format("{}: got log directory {}", __FUNCTION__, log_dir));
LOG_FMT_DEBUG(log, "got log directory {}", log_dir);

if (log_dir.empty())
return files_to_search;
Expand All @@ -97,7 +97,7 @@ std::list<std::string> getFilesToSearch(IServer & server, Poco::Logger * log, co
}
}

LOG_DEBUG(log, fmt::format("{}: got log files to search {}", __FUNCTION__, files_to_search));
LOG_FMT_DEBUG(log, "got log files to search {}", files_to_search);

return files_to_search;
}
Expand Down Expand Up @@ -128,7 +128,7 @@ grpc::Status searchLog(Poco::Logger * log, ::grpc::ServerWriter<::diagnosticspb:

if (!stream->Write(resp))
{
LOG_DEBUG(log, __PRETTY_FUNCTION__ << ": Write response failed for unknown reason.");
LOG_FMT_DEBUG(log, "Write response failed for unknown reason.");
return grpc::Status(grpc::StatusCode::UNKNOWN, "Write response failed for unknown reason.");
}
}
Expand Down Expand Up @@ -158,16 +158,16 @@ ::grpc::Status DiagnosticsService::search_log(
patterns.push_back(pattern);
}

LOG_DEBUG(log, __PRETTY_FUNCTION__ << ": Handling SearchLog: " << request->DebugString());
LOG_FMT_DEBUG(log, "Handling SearchLog: {}", request->DebugString());
SCOPE_EXIT({
LOG_DEBUG(log, __PRETTY_FUNCTION__ << ": Handling SearchLog done: " << request->DebugString());
LOG_FMT_DEBUG(log, "Handling SearchLog done: {}", request->DebugString());
});

auto files_to_search = getFilesToSearch(server, log, start_time);

for (const auto & path : files_to_search)
{
LOG_DEBUG(log, fmt::format("{}: start to search file {}", __FUNCTION__, path));
LOG_FMT_DEBUG(log, "start to search file {}", path);
auto status = grpc::Status::OK;
ReadLogFile(path, [&](std::istream & istr) {
LogIterator log_itr(start_time, end_time, levels, patterns, istr);
Expand Down
2 changes: 1 addition & 1 deletion dbms/src/Flash/LogSearch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,7 @@ LogIterator::~LogIterator()
{
if (err_info.has_value())
{
LOG_DEBUG(log, "LogIterator search end with error " << std::to_string(err_info->second) << " at line " << std::to_string(err_info->first));
LOG_FMT_DEBUG(log, "LogIterator search end with error {} at line {}.", err_info->second, err_info->first);
}
}

Expand Down
2 changes: 1 addition & 1 deletion dbms/src/Flash/Mpp/ExchangeReceiver.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -464,7 +464,7 @@ void ExchangeReceiverBase<RPCContext>::readLoop(const Request & req)
status = reader->finish();
if (status.ok())
{
LOG_DEBUG(log, "finish read : " << req.debugString());
LOG_FMT_DEBUG(log, "finish read : {}", req.debugString());
break;
}
else
Expand Down
2 changes: 1 addition & 1 deletion dbms/src/Interpreters/Aggregator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -644,7 +644,7 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, co
CompressedWriteBuffer compressed_buf(file_buf);
NativeBlockOutputStream block_out(compressed_buf, ClickHouseRevision::get(), getHeader(false));

LOG_DEBUG(log, "Writing part of aggregation data into temporary file " << path << ".");
LOG_FMT_DEBUG(log, "Writing part of aggregation data into temporary file {}.", path);
ProfileEvents::increment(ProfileEvents::ExternalAggregationWritePart);

/// Flush only two-level data and possibly overflow data.
Expand Down
6 changes: 3 additions & 3 deletions dbms/src/Interpreters/ExternalLoader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ void ExternalLoader::reloadFromConfigFile(const std::string & config_path, const
{
if (config_path.empty() || !config_repository->exists(config_path))
{
LOG_WARNING(log, "config file '" + config_path + "' does not exist");
LOG_FMT_WARNING(log, "config file '{}' does not exist", config_path);
}
else
{
Expand Down Expand Up @@ -284,7 +284,7 @@ void ExternalLoader::reloadFromConfigFile(const std::string & config_path, const
if (!startsWith(key, config_settings.external_config))
{
if (!startsWith(key, "comment") && !startsWith(key, "include_from"))
LOG_WARNING(log, config_path << ": unknown node in file: '" << key << "', expected '" << config_settings.external_config << "'");
LOG_FMT_WARNING(log, "{}: unknown node in file: '{}', expected '{}'", config_path, key, config_settings.external_config);
continue;
}

Expand All @@ -293,7 +293,7 @@ void ExternalLoader::reloadFromConfigFile(const std::string & config_path, const
name = config->getString(key + "." + config_settings.external_name);
if (name.empty())
{
LOG_WARNING(log, config_path << ": " + config_settings.external_name + " name cannot be empty");
LOG_FMT_WARNING(log, "{}: {} name cannot be empty", config_path, config_settings.external_name);
continue;
}

Expand Down
2 changes: 1 addition & 1 deletion dbms/src/Interpreters/ExternalLoader.h
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ class ExternalLoader

/// Check objects definitions in config files and reload or/and add new ones if the definition is changed
/// If loadable_name is not empty, load only loadable object with name loadable_name
void reloadFromConfigFiles(bool throw_on_error, bool force_reload = false, const std::string & loadable_name = "");
void reloadFromConfigFiles(bool throw_on_error, bool force_reload = false, const std::string & only_dictionary = "");
void reloadFromConfigFile(const std::string & config_path, bool throw_on_error, bool force_reload, const std::string & loadable_name);

/// Check config files and update expired loadable objects
Expand Down
3 changes: 1 addition & 2 deletions dbms/src/Interpreters/InterpreterSelectQuery.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -269,8 +269,7 @@ void InterpreterSelectQuery::getAndLockStorageWithSchemaVersion(const String & d
TableLockHolder lock;
Int64 storage_schema_version;
auto log_schema_version = [&](const String & result) {
LOG_DEBUG(log, __PRETTY_FUNCTION__ << " Table " << qualified_name << " schema " << result << " Schema version [storage, global, query]: "
<< "[" << storage_schema_version << ", " << global_schema_version << ", " << query_schema_version << "].");
LOG_FMT_DEBUG(log, "Table {} schema {} Schema version [storage, global, query]: [{}, {}, {}].", qualified_name, result, storage_schema_version, global_schema_version, query_schema_version);
};
bool ok;
{
Expand Down
15 changes: 8 additions & 7 deletions dbms/src/Interpreters/SharedQueries.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,9 +103,11 @@ class SharedQueries
{
if (clients != it->second->clients)
{
LOG_WARNING(log,
"Different client numbers between shared queries with same query_id(" //
<< query_id << "), former: " << it->second->clients << ", now: " << clients);
LOG_FMT_WARNING(log,
"Different client numbers between shared queries with same query_id({}), former: {} now: {}",
query_id,
it->second->clients,
clients);
}
auto & query = *(it->second);
if (query.connected_clients >= clients)
Expand Down Expand Up @@ -143,10 +145,9 @@ class SharedQueries
const auto it = queries.find(query_id);
if (it == queries.end())
{
LOG_WARNING(log,
"Shared query finished with query_id(" //
<< query_id << "), while resource cache not exists."
<< " Maybe this client takes too long before finish");
LOG_FMT_WARNING(log,
"Shared query finished with query_id({}), while resource cache not exists. Maybe this client takes too long before finish",
query_id);
return;
}
auto & query = *(it->second);
Expand Down