Skip to content

Commit

Permalink
Merge branch 'master' into refine_join_probe
Browse files Browse the repository at this point in the history
  • Loading branch information
SeaRise authored Apr 10, 2023
2 parents 7fb7694 + 1237e95 commit c309323
Show file tree
Hide file tree
Showing 72 changed files with 1,650 additions and 3,310 deletions.
2 changes: 1 addition & 1 deletion contrib/kvproto
5 changes: 3 additions & 2 deletions dbms/src/Client/Connection.h
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,8 @@ class Connection : private boost::noncopyable
setDescription();
}

virtual ~Connection(){};
virtual ~Connection() = default;
;

/// Set throttler of network traffic. One throttler could be used for multiple connections to limit total traffic.
void setThrottler(const ThrottlerPtr & throttler_)
Expand Down Expand Up @@ -268,7 +269,7 @@ class Connection : private boost::noncopyable
class LoggerWrapper
{
public:
LoggerWrapper(Connection & parent_)
explicit LoggerWrapper(Connection & parent_)
: log(nullptr)
, parent(parent_)
{
Expand Down
8 changes: 8 additions & 0 deletions dbms/src/Columns/ColumnUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include <Columns/ColumnNullable.h>
#include <Columns/ColumnUtils.h>
#include <DataTypes/DataTypeNullable.h>

namespace DB
{
Expand All @@ -30,4 +32,10 @@ bool columnEqual(const ColumnPtr & expected, const ColumnPtr & actual, String &
}
return true;
}
void convertColumnToNullable(ColumnWithTypeAndName & column)
{
column.type = makeNullable(column.type);
if (column.column)
column.column = makeNullable(column.column);
}
} // namespace DB
2 changes: 2 additions & 0 deletions dbms/src/Columns/ColumnUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,10 @@
#pragma once

#include <Columns/IColumn.h>
#include <Core/ColumnWithTypeAndName.h>

namespace DB
{
bool columnEqual(const ColumnPtr & expected, const ColumnPtr & actual, String & unequal_msg);
void convertColumnToNullable(ColumnWithTypeAndName & column);
} // namespace DB
3 changes: 1 addition & 2 deletions dbms/src/Common/ExternalTable.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
#pragma once

#include <Client/Connection.h>
#include <Common/HTMLForm.h>
#include <DataStreams/AsynchronousBlockInputStream.h>
#include <DataTypes/DataTypeFactory.h>
#include <IO/ReadBufferFromFile.h>
Expand Down Expand Up @@ -195,7 +194,7 @@ class ExternalTablesHandler : public Poco::Net::PartHandler
, params(params_)
{}

void handlePart(const Poco::Net::MessageHeader & header, std::istream & stream)
void handlePart(const Poco::Net::MessageHeader & header, std::istream & stream) override
{
/// The buffer is initialized here, not in the virtual function initReadBuffer
read_buffer = std::make_unique<ReadBufferFromIStream>(stream);
Expand Down
56 changes: 0 additions & 56 deletions dbms/src/Common/HTMLForm.h

This file was deleted.

33 changes: 26 additions & 7 deletions dbms/src/Common/TiFlashMetrics.h
Original file line number Diff line number Diff line change
Expand Up @@ -218,8 +218,8 @@ namespace DB
F(type_lock_conflict, {"type", "lock_conflict"}), F(type_delete_conflict, {"type", "delete_conflict"}), \
F(type_delete_risk, {"type", "delete_risk"})) \
M(tiflash_disaggregated_object_lock_request_duration_seconds, "Bucketed histogram of S3 object lock/delete request duration", Histogram, \
F(type_lock, {{"type", "cop"}}, ExpBuckets{0.001, 2, 20}), \
F(type_delete, {{"type", "batch"}}, ExpBuckets{0.001, 2, 20})) \
F(type_lock, {{"type", "lock"}}, ExpBuckets{0.001, 2, 20}), \
F(type_delete, {{"type", "delete"}}, ExpBuckets{0.001, 2, 20})) \
M(tiflash_disaggregated_read_tasks_count, "Total number of storage engine disaggregated read tasks", Counter) \
M(tiflash_disaggregated_breakdown_duration_seconds, "", Histogram, \
F(type_rpc_establish, {{"type", "rpc_establish"}}, ExpBuckets{0.01, 2, 20}), \
Expand Down Expand Up @@ -319,8 +319,26 @@ namespace DB
M(tiflash_storage_remote_stats, "The file stats on remote store", Gauge, \
F(type_total_size, {"type", "total_size"}), F(type_valid_size, {"type", "valid_size"}), \
F(type_num_files, {"type", "num_files"})) \
M(tiflash_storage_checkpoint_seconds, "PageStorage checkpoint elapsed time", \
Histogram, /* these command usually cost several seconds, increase the start bucket to 50ms */ \
F(type_dump_checkpoint_snapshot, {{"type", "dump_checkpoint_snapshot"}}, ExpBuckets{0.05, 2, 20}), \
F(type_dump_checkpoint_data, {{"type", "dump_checkpoint_data"}}, ExpBuckets{0.05, 2, 20}), \
F(type_upload_checkpoint, {{"type", "upload_checkpoint"}}, ExpBuckets{0.05, 2, 20}), \
F(type_copy_checkpoint_info, {{"type", "copy_checkpoint_info"}}, ExpBuckets{0.05, 2, 20})) \
M(tiflash_storage_checkpoint_flow, "The bytes flow cause by remote checkpoint", Counter, \
F(type_incremental, {"type", "incremental"}), F(type_compaction, {"type", "compaction"})) \
M(tiflash_storage_checkpoint_keys_by_types, "The keys flow cause by remote checkpoint", Counter, \
F(type_raftengine, {"type", "raftengine"}), F(type_kvengine, {"type", "kvengine"}), F(type_kvstore, {"type", "kvstore"}), \
F(type_data, {"type", "data"}), F(type_log, {"type", "log"}), F(type_meta, {"type", "kvstore"}), \
F(type_unknown, {"type", "unknown"})) \
M(tiflash_storage_checkpoint_flow_by_types, "The bytes flow cause by remote checkpoint", Counter, \
F(type_raftengine, {"type", "raftengine"}), F(type_kvengine, {"type", "kvengine"}), F(type_kvstore, {"type", "kvstore"}), \
F(type_data, {"type", "data"}), F(type_log, {"type", "log"}), F(type_meta, {"type", "kvstore"}), \
F(type_unknown, {"type", "unknown"})) \
M(tiflash_storage_page_data_by_types, "The existing bytes stored in UniPageStorage", Gauge, \
F(type_raftengine, {"type", "raftengine"}), F(type_kvengine, {"type", "kvengine"}), F(type_kvstore, {"type", "kvstore"}), \
F(type_data, {"type", "data"}), F(type_log, {"type", "log"}), F(type_meta, {"type", "kvstore"}), \
F(type_unknown, {"type", "unknown"})) \
M(tiflash_storage_s3_request_seconds, "S3 request duration in seconds", Histogram, \
F(type_put_object, {{"type", "put_object"}}, ExpBuckets{0.001, 2, 20}), \
F(type_copy_object, {{"type", "copy_object"}}, ExpBuckets{0.001, 2, 20}), \
Expand All @@ -331,6 +349,11 @@ namespace DB
F(type_list_objects, {{"type", "list_objects"}}, ExpBuckets{0.001, 2, 20}), \
F(type_delete_object, {{"type", "delete_object"}}, ExpBuckets{0.001, 2, 20}), \
F(type_head_object, {{"type", "head_object"}}, ExpBuckets{0.001, 2, 20})) \
M(tiflash_storage_s3_gc_status, "S3 GC status", Gauge, \
F(type_lifecycle_added, {{"type", "lifecycle_added"}}), \
F(type_lifecycle_failed, {{"type", "lifecycle_failed"}}), \
F(type_owner, {{"type", "owner"}}), \
F(type_running, {{"type", "running"}})) \
M(tiflash_storage_s3_gc_seconds, "S3 GC subprocess duration in seconds", \
Histogram, /* these command usually cost several seconds, increase the start bucket to 500ms */ \
F(type_total, {{"type", "total"}}, ExpBuckets{0.5, 2, 20}), \
Expand All @@ -339,11 +362,7 @@ namespace DB
F(type_clean_locks, {{"type", "clean_locks"}}, ExpBuckets{0.5, 2, 20}), \
F(type_clean_manifests, {{"type", "clean_manifests"}}, ExpBuckets{0.5, 2, 20}), \
F(type_scan_then_clean_data_files, {{"type", "scan_then_clean_data_files"}}, ExpBuckets{0.5, 2, 20}), \
F(type_clean_one_lock, {{"type", "clean_one_lock"}}, ExpBuckets{0.5, 2, 20})) \
M(tiflash_storage_checkpoint_seconds, "PageStorage checkpoint elapsed time", Histogram, \
F(type_dump_checkpoint_snapshot, {{"type", "dump_checkpoint_snapshot"}}, ExpBuckets{0.001, 2, 20}), \
F(type_dump_checkpoint_data, {{"type", "dump_checkpoint_data"}}, ExpBuckets{0.001, 2, 20}), \
F(type_upload_checkpoint, {{"type", "upload_checkpoint"}}, ExpBuckets{0.001, 2, 20}))
F(type_clean_one_lock, {{"type", "clean_one_lock"}}, ExpBuckets{0.5, 2, 20}))

// clang-format on

Expand Down
8 changes: 1 addition & 7 deletions dbms/src/Core/Defines.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,13 @@
#include <common/defines.h>
#include <common/types.h>

#define DBMS_NAME "ClickHouse"
#define DBMS_NAME "TiFlash"
#define DBMS_VERSION_MAJOR 1
#define DBMS_VERSION_MINOR 1

#define DBMS_DEFAULT_HOST "localhost"
#define DBMS_DEFAULT_PORT 9000
#define DBMS_DEFAULT_SECURE_PORT 9440
#define DBMS_DEFAULT_HTTP_PORT 8123
#define DBMS_DEFAULT_CONNECT_TIMEOUT_SEC 10
#define DBMS_DEFAULT_CONNECT_TIMEOUT_WITH_FAILOVER_MS 50
#define DBMS_DEFAULT_SEND_TIMEOUT_SEC 300
Expand Down Expand Up @@ -104,17 +103,12 @@ static constexpr UInt64 DEFAULT_DISAGG_TASK_TIMEOUT_SEC = 5 * 60;

#define DEFAULT_QUERY_LOG_FLUSH_INTERVAL_MILLISECONDS 7500

#define DEFAULT_HTTP_READ_BUFFER_TIMEOUT 1800
#define DEFAULT_HTTP_READ_BUFFER_CONNECTION_TIMEOUT 1

#define PLATFORM_NOT_SUPPORTED "The only supported platforms are x86_64 and AArch64 (work in progress)"

#define DEFAULT_MARK_CACHE_SIZE (1ULL * 1024 * 1024 * 1024)

#define DEFAULT_METRICS_PORT 8234

#define DEFAULT_HTTP_PORT 8123

#if !defined(__x86_64__) && !defined(__aarch64__)
// #error PLATFORM_NOT_SUPPORTED
#endif
17 changes: 9 additions & 8 deletions dbms/src/DataStreams/NonJoinedBlockInputStream.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include <Columns/ColumnUtils.h>
#include <DataStreams/NonJoinedBlockInputStream.h>
#include <DataStreams/materializeBlock.h>

Expand Down Expand Up @@ -200,12 +201,12 @@ void NonJoinedBlockInputStream::fillColumnsUsingCurrentPartition(
}
if (parent.strictness == ASTTableJoin::Strictness::Any)
{
switch (parent.type)
switch (parent.join_map_method)
{
#define M(TYPE) \
case JoinType::TYPE: \
#define M(METHOD) \
case JoinMapMethod::METHOD: \
fillColumns<ASTTableJoin::Strictness::Any>( \
*partition->maps_any_full.TYPE, \
*partition->maps_any_full.METHOD, \
num_columns_left, \
mutable_columns_left, \
num_columns_right, \
Expand All @@ -221,12 +222,12 @@ void NonJoinedBlockInputStream::fillColumnsUsingCurrentPartition(
}
else if (parent.strictness == ASTTableJoin::Strictness::All)
{
switch (parent.type)
switch (parent.join_map_method)
{
#define M(TYPE) \
case JoinType::TYPE: \
#define M(METHOD) \
case JoinMapMethod::METHOD: \
fillColumns<ASTTableJoin::Strictness::All>( \
*partition->maps_all_full.TYPE, \
*partition->maps_all_full.METHOD, \
num_columns_left, \
mutable_columns_left, \
num_columns_right, \
Expand Down
10 changes: 0 additions & 10 deletions dbms/src/Dictionaries/DictionarySourceFactory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
#include <Dictionaries/DictionaryStructure.h>
#include <Dictionaries/ExecutableDictionarySource.h>
#include <Dictionaries/FileDictionarySource.h>
#include <Dictionaries/HTTPDictionarySource.h>
#include <Dictionaries/LibraryDictionarySource.h>
#include <IO/HTTPCommon.h>
#include <Poco/Logger.h>
Expand Down Expand Up @@ -143,15 +142,6 @@ DictionarySourcePtr DictionarySourceFactory::create(

return std::make_unique<ExecutableDictionarySource>(dict_struct, config, config_prefix + ".executable", sample_block, context);
}
else if ("http" == source_type)
{
if (dict_struct.has_expressions)
throw Exception{
"Dictionary source of type `http` does not support attribute expressions",
ErrorCodes::LOGICAL_ERROR};

return std::make_unique<HTTPDictionarySource>(dict_struct, config, config_prefix + ".http", sample_block, context);
}
else if ("library" == source_type)
{
return std::make_unique<LibraryDictionarySource>(dict_struct, config, config_prefix + ".library", sample_block, context);
Expand Down
Loading

0 comments on commit c309323

Please sign in to comment.