From 006ff423f2ba463c90e6baf3cf8eed4a58adeb7a Mon Sep 17 00:00:00 2001 From: Lloyd-Pottiger Date: Fri, 12 May 2023 22:04:26 +0800 Subject: [PATCH 1/7] fix Signed-off-by: Lloyd-Pottiger --- dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp | 4 ++-- .../mpp/late_materialization_generate_column.test | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp b/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp index f75878ceee0..b02fdc3266e 100644 --- a/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp +++ b/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp @@ -432,10 +432,10 @@ void DAGStorageInterpreter::executeImpl(DAGPipeline & pipeline) FAIL_POINT_PAUSE(FailPoints::pause_after_copr_streams_acquired); FAIL_POINT_PAUSE(FailPoints::pause_after_copr_streams_acquired_once); - /// handle timezone/duration cast for local and remote table scan. - executeCastAfterTableScan(remote_read_streams_start_index, pipeline); /// handle generated column if necessary. executeGeneratedColumnPlaceholder(remote_read_streams_start_index, generated_column_infos, log, pipeline); + /// handle timezone/duration cast for local and remote table scan. + executeCastAfterTableScan(remote_read_streams_start_index, pipeline); recordProfileStreams(pipeline, table_scan.getTableScanExecutorID()); /// handle filter conditions for local and remote table scan. diff --git a/tests/fullstack-test/mpp/late_materialization_generate_column.test b/tests/fullstack-test/mpp/late_materialization_generate_column.test index a3021e70944..ffc382c1cf7 100644 --- a/tests/fullstack-test/mpp/late_materialization_generate_column.test +++ b/tests/fullstack-test/mpp/late_materialization_generate_column.test @@ -13,7 +13,7 @@ # limitations under the License. -mysql> CREATE TABLE test.`IDT_26539` (`COL102` float DEFAULT NULL, `COL103` float DEFAULT NULL, `COL1` float GENERATED ALWAYS AS ((`COL102` DIV 10)) VIRTUAL, `COL2` varchar(20) COLLATE utf8mb4_bin DEFAULT NULL, `COL4` datetime DEFAULT NULL, `COL3` bigint DEFAULT NULL, `COL5` float DEFAULT NULL, KEY `UK_COL1` (`COL1`) /*!80000 INVISIBLE */) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +mysql> CREATE TABLE test.`IDT_26539` (`COL102` float DEFAULT NULL, `COL103` float DEFAULT NULL, `COL1` float GENERATED ALWAYS AS ((`COL102` DIV 10)) VIRTUAL, `COL2` varchar(20) COLLATE utf8mb4_bin DEFAULT NULL, `COL4` datetime DEFAULT NULL, `COL3` bigint DEFAULT NULL, `COL5` time DEFAULT NULL, KEY `UK_COL1` (`COL1`) /*!80000 INVISIBLE */) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; mysql> insert into test.IDT_26539 (COL102, COL103, COL2, COL4, COL3, COL5) values (NULL, NULL, NULL, NULL, NULL, NULL); mysql> insert into test.IDT_26539 (COL102, COL103, COL2, COL4, COL3, COL5) select COL102, COL103, COL2, COL4, COL3, COL5 from test.IDT_26539; mysql> insert into test.IDT_26539 (COL102, COL103, COL2, COL4, COL3, COL5) select COL102, COL103, COL2, COL4, COL3, COL5 from test.IDT_26539; From 698f46ca01e405de8a2972367f4feb2f85728365 Mon Sep 17 00:00:00 2001 From: Lloyd-Pottiger Date: Fri, 12 May 2023 22:06:55 +0800 Subject: [PATCH 2/7] fix for pipeline Signed-off-by: Lloyd-Pottiger --- dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp b/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp index b02fdc3266e..e4a789e7aed 100644 --- a/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp +++ b/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp @@ -352,11 +352,12 @@ SourceOps DAGStorageInterpreter::executeImpl(PipelineExecutorStatus & exec_statu void DAGStorageInterpreter::executeSuffix(PipelineExecutorStatus & exec_status, PipelineExecGroupBuilder & group_builder) { + /// handle generated column if necessary. + executeGeneratedColumnPlaceholder(exec_status, group_builder, remote_read_sources_start_index, generated_column_infos, log); + /// handle timezone/duration cast for local table scan. executeCastAfterTableScan(exec_status, group_builder, remote_read_sources_start_index); - executeGeneratedColumnPlaceholder(exec_status, group_builder, remote_read_sources_start_index, generated_column_infos, log); - /// handle filter conditions for local and remote table scan. if (filter_conditions.hasValue()) { From ac7770bc1046292878f0cafd7a047cb26c62955b Mon Sep 17 00:00:00 2001 From: Lloyd-Pottiger Date: Mon, 15 May 2023 13:29:19 +0800 Subject: [PATCH 3/7] fix 7455 Signed-off-by: Lloyd-Pottiger --- .../Coprocessor/DAGStorageInterpreter.cpp | 49 +++++++++++-------- .../Flash/Coprocessor/DAGStorageInterpreter.h | 3 +- .../Storages/DeltaMerge/DeltaMergeStore.cpp | 2 +- .../DeltaMerge/Filter/PushDownFilter.h | 9 ++-- dbms/src/Storages/StorageDeltaMerge.cpp | 17 ++++++- .../duration_filter_late_materialization.test | 47 ++++++++++++++++++ 6 files changed, 99 insertions(+), 28 deletions(-) create mode 100644 tests/fullstack-test/expr/duration_filter_late_materialization.test diff --git a/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp b/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp index e4a789e7aed..21735024745 100644 --- a/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp +++ b/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp @@ -354,7 +354,16 @@ void DAGStorageInterpreter::executeSuffix(PipelineExecutorStatus & exec_status, { /// handle generated column if necessary. executeGeneratedColumnPlaceholder(exec_status, group_builder, remote_read_sources_start_index, generated_column_infos, log); - + NamesAndTypes source_columns; + source_columns.reserve(table_scan.getColumnSize()); + const auto table_scan_output_header = group_builder.getCurrentHeader(); + for (const auto & col : table_scan_output_header) + source_columns.emplace_back(col.name, col.type); + analyzer = std::make_unique(std::move(source_columns), context); + /// If there is no local source, there is no need to execute cast and push down filter, return directly. + /// But we should make sure that the analyzer is initialized before return. + if (remote_read_sources_start_index == 0) + return; /// handle timezone/duration cast for local table scan. executeCastAfterTableScan(exec_status, group_builder, remote_read_sources_start_index); @@ -432,9 +441,18 @@ void DAGStorageInterpreter::executeImpl(DAGPipeline & pipeline) FAIL_POINT_PAUSE(FailPoints::pause_after_copr_streams_acquired); FAIL_POINT_PAUSE(FailPoints::pause_after_copr_streams_acquired_once); - /// handle generated column if necessary. executeGeneratedColumnPlaceholder(remote_read_streams_start_index, generated_column_infos, log, pipeline); + NamesAndTypes source_columns; + source_columns.reserve(table_scan.getColumnSize()); + const auto table_scan_output_header = pipeline.firstStream()->getHeader(); + for (const auto & col : table_scan_output_header) + source_columns.emplace_back(col.name, col.type); + analyzer = std::make_unique(std::move(source_columns), context); + /// If there is no local stream, there is no need to execute cast and push down filter, return directly. + /// But we should make sure that the analyzer is initialized before return. + if (remote_read_streams_start_index == 0) + return; /// handle timezone/duration cast for local and remote table scan. executeCastAfterTableScan(remote_read_streams_start_index, pipeline); recordProfileStreams(pipeline, table_scan.getTableScanExecutorID()); @@ -479,9 +497,7 @@ void DAGStorageInterpreter::prepare() assert(storages_with_structure_lock.find(logical_table_id) != storages_with_structure_lock.end()); storage_for_logical_table = storages_with_structure_lock[logical_table_id].storage; - std::tie(required_columns, source_columns, is_need_add_cast_column) = getColumnsForTableScan(); - - analyzer = std::make_unique(std::move(source_columns), context); + std::tie(required_columns, is_need_add_cast_column) = getColumnsForTableScan(); } void DAGStorageInterpreter::executeCastAfterTableScan( @@ -1223,12 +1239,10 @@ std::unordered_map DAG return storages_with_lock; } -std::tuple> DAGStorageInterpreter::getColumnsForTableScan() +std::tuple> DAGStorageInterpreter::getColumnsForTableScan() { Names required_columns_tmp; required_columns_tmp.reserve(table_scan.getColumnSize()); - NamesAndTypes source_columns_tmp; - source_columns_tmp.reserve(table_scan.getColumnSize()); std::vector need_cast_column; need_cast_column.reserve(table_scan.getColumnSize()); String handle_column_name = MutableSupport::tidb_pk_column_name; @@ -1246,7 +1260,6 @@ std::tuple> DAGStorageIn const auto & data_type = getDataTypeByColumnInfoForComputingLayer(ci); const auto & col_name = GeneratedColumnPlaceholderBlockInputStream::getColumnName(i); generated_column_infos.push_back(std::make_tuple(i, col_name, data_type)); - source_columns_tmp.emplace_back(NameAndTypePair{col_name, data_type}); continue; } // Column ID -1 return the handle column @@ -1257,16 +1270,6 @@ std::tuple> DAGStorageIn name = MutableSupport::extra_table_id_column_name; else name = storage_for_logical_table->getTableInfo().getColumnName(cid); - if (cid == ExtraTableIDColumnID) - { - NameAndTypePair extra_table_id_column_pair = {name, MutableSupport::extra_table_id_column_type}; - source_columns_tmp.emplace_back(std::move(extra_table_id_column_pair)); - } - else - { - auto pair = storage_for_logical_table->getColumns().getPhysical(name); - source_columns_tmp.emplace_back(std::move(pair)); - } required_columns_tmp.emplace_back(std::move(name)); } @@ -1277,6 +1280,12 @@ std::tuple> DAGStorageIn } for (const auto & col : table_scan.getColumns()) { + if (col.hasGeneratedColumnFlag()) + { + need_cast_column.push_back(ExtraCastAfterTSMode::None); + continue; + } + if (col_id_set.contains(col.id)) { need_cast_column.push_back(ExtraCastAfterTSMode::None); @@ -1292,7 +1301,7 @@ std::tuple> DAGStorageIn } } - return {required_columns_tmp, source_columns_tmp, need_cast_column}; + return {required_columns_tmp, need_cast_column}; } // Build remote requests from `region_retry_from_local_region` and `table_regions_info.remote_regions` diff --git a/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.h b/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.h index 74ef84cb7a5..382594bf753 100644 --- a/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.h +++ b/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.h @@ -98,7 +98,7 @@ class DAGStorageInterpreter std::unordered_map getAndLockStorages(Int64 query_schema_version); - std::tuple> getColumnsForTableScan(); + std::tuple> getColumnsForTableScan(); std::vector buildRemoteRequests(const DM::ScanContextPtr & scan_context); @@ -164,7 +164,6 @@ class DAGStorageInterpreter std::unordered_map storages_with_structure_lock; ManageableStoragePtr storage_for_logical_table; Names required_columns; - NamesAndTypes source_columns; // For generated column, just need a placeholder, and TiDB will fill this column. std::vector> generated_column_infos; diff --git a/dbms/src/Storages/DeltaMerge/DeltaMergeStore.cpp b/dbms/src/Storages/DeltaMerge/DeltaMergeStore.cpp index bef99e57b55..878e0930f92 100644 --- a/dbms/src/Storages/DeltaMerge/DeltaMergeStore.cpp +++ b/dbms/src/Storages/DeltaMerge/DeltaMergeStore.cpp @@ -1087,7 +1087,7 @@ BlockInputStreams DeltaMergeStore::read(const Context & db_context, { stream = std::make_shared( read_task_pool, - columns_to_read, + filter && filter->extra_cast ? *filter->columns_after_cast : columns_to_read, extra_table_id_index, physical_table_id, log_tracing_id); diff --git a/dbms/src/Storages/DeltaMerge/Filter/PushDownFilter.h b/dbms/src/Storages/DeltaMerge/Filter/PushDownFilter.h index 0d7e21e10cd..fd910a9a6ba 100644 --- a/dbms/src/Storages/DeltaMerge/Filter/PushDownFilter.h +++ b/dbms/src/Storages/DeltaMerge/Filter/PushDownFilter.h @@ -31,19 +31,18 @@ class PushDownFilter : public std::enable_shared_from_this const ExpressionActionsPtr & beofre_where_, const ColumnDefines & filter_columns_, const String filter_column_name_, - const ExpressionActionsPtr & extra_cast_) + const ExpressionActionsPtr & extra_cast_, + const ColumnDefinesPtr & columns_after_cast_) : rs_operator(rs_operator_) , before_where(beofre_where_) , filter_column_name(std::move(filter_column_name_)) , filter_columns(std::move(filter_columns_)) , extra_cast(extra_cast_) + , columns_after_cast(columns_after_cast_) {} explicit PushDownFilter(const RSOperatorPtr & rs_operator_) : rs_operator(rs_operator_) - , before_where(nullptr) - , filter_columns({}) - , extra_cast(nullptr) {} // Rough set operator @@ -56,6 +55,8 @@ class PushDownFilter : public std::enable_shared_from_this ColumnDefines filter_columns; // The expression actions used to cast the timestamp/datetime column ExpressionActionsPtr extra_cast; + // If the extra_cast is not null, the types of the columns may be changed + ColumnDefinesPtr columns_after_cast; }; } // namespace DB::DM diff --git a/dbms/src/Storages/StorageDeltaMerge.cpp b/dbms/src/Storages/StorageDeltaMerge.cpp index efd8dc1d6bf..8dbdb91f596 100644 --- a/dbms/src/Storages/StorageDeltaMerge.cpp +++ b/dbms/src/Storages/StorageDeltaMerge.cpp @@ -839,7 +839,22 @@ DM::PushDownFilterPtr StorageDeltaMerge::buildPushDownFilter(const RSOperatorPtr auto [before_where, filter_column_name, _] = ::DB::buildPushDownFilter(pushed_down_filters, *analyzer); LOG_DEBUG(tracing_logger, "Push down filter: {}", before_where->dumpActions()); - return std::make_shared(rs_operator, before_where, filter_columns, filter_column_name, extra_cast); + auto columns_after_cast = std::make_shared(); + if (extra_cast != nullptr) + { + columns_after_cast->reserve(columns_to_read.size()); + const auto & source_columns = analyzer->getCurrentInputColumns(); + for (size_t i = 0; i < table_scan_column_info.size(); ++i) + { + if (table_scan_column_info[i].hasGeneratedColumnFlag()) + continue; + auto it = columns_to_read_map.at(table_scan_column_info[i].id); + RUNTIME_CHECK(it.name == source_columns[i].name); + columns_after_cast->push_back(it); + columns_after_cast->back().type = source_columns[i].type; + } + } + return std::make_shared(rs_operator, before_where, filter_columns, filter_column_name, extra_cast, columns_after_cast); } LOG_DEBUG(tracing_logger, "Push down filter is empty"); return std::make_shared(rs_operator); diff --git a/tests/fullstack-test/expr/duration_filter_late_materialization.test b/tests/fullstack-test/expr/duration_filter_late_materialization.test new file mode 100644 index 00000000000..63b496c3f5d --- /dev/null +++ b/tests/fullstack-test/expr/duration_filter_late_materialization.test @@ -0,0 +1,47 @@ +# Copyright 2023 PingCAP, Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +mysql> drop table if exists test.t; +mysql> create table if not exists test.t(a time(4), i int); + +# insert more than 8192 rows to make sure filter conditions can be pushed down. +mysql> insert into test.t values('-700:10:10.123456', 1), ('700:11:11.123500', 2), ('600:11:11.123500', 3); +mysql> insert into test.t select * from test.t; +mysql> insert into test.t select * from test.t; +mysql> insert into test.t select * from test.t; +mysql> insert into test.t select * from test.t; +mysql> insert into test.t select * from test.t; +mysql> insert into test.t select * from test.t; +mysql> insert into test.t select * from test.t; +mysql> insert into test.t select * from test.t; +mysql> insert into test.t select * from test.t; +mysql> insert into test.t select * from test.t; +mysql> insert into test.t select * from test.t; +mysql> insert into test.t select * from test.t; +mysql> insert into test.t select * from test.t; + +mysql> alter table test.t set tiflash replica 1; + +func> wait_table test t + +mysql> select * from test.t where a = '500:11:11.123500'; +# success, but the result is empty +mysql> select hour(a), i from test.t where a = '500:11:11.123500'; +mysql> select minute(a), i from test.t where a = '500:11:11.123500'; +mysql> select second(a), i from test.t where a = '500:11:11.123500'; +mysql> select a, i from test.t where hour(a) = 500; +mysql> select a, i from test.t where minute(a) = 13; +mysql> select a, i from test.t where second(a) = 14; + +mysql> drop table test.t; \ No newline at end of file From d27fcc4ebd4bb259132c7dfeafdfa485f7e44bf8 Mon Sep 17 00:00:00 2001 From: Lloyd-Pottiger Date: Mon, 15 May 2023 13:42:08 +0800 Subject: [PATCH 4/7] refine some code in StorageDisaggregated Signed-off-by: Lloyd-Pottiger --- dbms/src/Storages/StorageDisaggregated.cpp | 50 +++++++++++++++++++ .../Storages/StorageDisaggregatedRemote.cpp | 50 +------------------ 2 files changed, 52 insertions(+), 48 deletions(-) diff --git a/dbms/src/Storages/StorageDisaggregated.cpp b/dbms/src/Storages/StorageDisaggregated.cpp index f40776286d9..bc0e732477a 100644 --- a/dbms/src/Storages/StorageDisaggregated.cpp +++ b/dbms/src/Storages/StorageDisaggregated.cpp @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include #include #include #include @@ -305,4 +306,53 @@ void StorageDisaggregated::filterConditions(DAGExpressionAnalyzer & analyzer, DA pipeline.transform([&profile_streams](auto & stream) { profile_streams.push_back(stream); }); } } + +void StorageDisaggregated::extraCast(DAGExpressionAnalyzer & analyzer, DAGPipeline & pipeline) +{ + // If the column is not in the columns of pushed down filter, append a cast to the column. + std::vector need_cast_column; + need_cast_column.reserve(table_scan.getColumnSize()); + std::unordered_set col_id_set; + for (const auto & expr : table_scan.getPushedDownFilters()) + { + getColumnIDsFromExpr(expr, table_scan.getColumns(), col_id_set); + } + bool has_need_cast_column = false; + for (const auto & col : table_scan.getColumns()) + { + if (col_id_set.contains(col.id)) + { + need_cast_column.push_back(ExtraCastAfterTSMode::None); + } + else + { + if (col.id != -1 && col.tp == TiDB::TypeTimestamp) + { + need_cast_column.push_back(ExtraCastAfterTSMode::AppendTimeZoneCast); + has_need_cast_column = true; + } + else if (col.id != -1 && col.tp == TiDB::TypeTime) + { + need_cast_column.push_back(ExtraCastAfterTSMode::AppendDurationCast); + has_need_cast_column = true; + } + else + { + need_cast_column.push_back(ExtraCastAfterTSMode::None); + } + } + } + ExpressionActionsChain chain; + if (has_need_cast_column && analyzer.appendExtraCastsAfterTS(chain, need_cast_column, table_scan)) + { + ExpressionActionsPtr extra_cast = chain.getLastActions(); + chain.finalize(); + chain.clear(); + for (auto & stream : pipeline.streams) + { + stream = std::make_shared(stream, extra_cast, log->identifier()); + stream->setExtraInfo("cast after local tableScan"); + } + } +} } // namespace DB diff --git a/dbms/src/Storages/StorageDisaggregatedRemote.cpp b/dbms/src/Storages/StorageDisaggregatedRemote.cpp index a06a5ba34d2..17a481e6ba7 100644 --- a/dbms/src/Storages/StorageDisaggregatedRemote.cpp +++ b/dbms/src/Storages/StorageDisaggregatedRemote.cpp @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include @@ -156,56 +155,11 @@ BlockInputStreams StorageDisaggregated::readThroughS3( { source_columns.emplace_back(col.name, col.type); } - analyzer = std::make_unique(std::move(source_columns), context); // Handle duration type column - // If the column is not in the columns of pushed down filter, append a cast to the column. - std::vector need_cast_column; - need_cast_column.reserve(table_scan.getColumnSize()); - std::unordered_set col_id_set; - for (const auto & expr : table_scan.getPushedDownFilters()) - { - getColumnIDsFromExpr(expr, table_scan.getColumns(), col_id_set); - } - bool has_need_cast_column = false; - for (const auto & col : table_scan.getColumns()) - { - if (col_id_set.contains(col.id)) - { - need_cast_column.push_back(ExtraCastAfterTSMode::None); - } - else - { - if (col.id != -1 && col.tp == TiDB::TypeTimestamp) - { - need_cast_column.push_back(ExtraCastAfterTSMode::AppendTimeZoneCast); - has_need_cast_column = true; - } - else if (col.id != -1 && col.tp == TiDB::TypeTime) - { - need_cast_column.push_back(ExtraCastAfterTSMode::AppendDurationCast); - has_need_cast_column = true; - } - else - { - need_cast_column.push_back(ExtraCastAfterTSMode::None); - } - } - } - ExpressionActionsChain chain; - if (has_need_cast_column && analyzer->appendExtraCastsAfterTS(chain, need_cast_column, table_scan)) - { - ExpressionActionsPtr extra_cast = chain.getLastActions(); - chain.finalize(); - chain.clear(); - for (auto & stream : pipeline.streams) - { - stream = std::make_shared(stream, extra_cast, log->identifier()); - stream->setExtraInfo("cast after local tableScan"); - } - } - + extraCast(*analyzer, pipeline); + // Handle filter filterConditions(*analyzer, pipeline); return pipeline.streams; } From 2dcf4471ccad19d980892739bf53777e63a97fbb Mon Sep 17 00:00:00 2001 From: Lloyd-Pottiger Date: Mon, 15 May 2023 17:13:37 +0800 Subject: [PATCH 5/7] address comments Signed-off-by: Lloyd-Pottiger --- dbms/src/Storages/StorageDeltaMerge.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/dbms/src/Storages/StorageDeltaMerge.cpp b/dbms/src/Storages/StorageDeltaMerge.cpp index 8dbdb91f596..b4cf77e0aa2 100644 --- a/dbms/src/Storages/StorageDeltaMerge.cpp +++ b/dbms/src/Storages/StorageDeltaMerge.cpp @@ -843,15 +843,15 @@ DM::PushDownFilterPtr StorageDeltaMerge::buildPushDownFilter(const RSOperatorPtr if (extra_cast != nullptr) { columns_after_cast->reserve(columns_to_read.size()); - const auto & source_columns = analyzer->getCurrentInputColumns(); + const auto & current_names_and_types = analyzer->getCurrentInputColumns(); for (size_t i = 0; i < table_scan_column_info.size(); ++i) { if (table_scan_column_info[i].hasGeneratedColumnFlag()) continue; - auto it = columns_to_read_map.at(table_scan_column_info[i].id); - RUNTIME_CHECK(it.name == source_columns[i].name); - columns_after_cast->push_back(it); - columns_after_cast->back().type = source_columns[i].type; + auto col = columns_to_read_map.at(table_scan_column_info[i].id); + RUNTIME_CHECK_MSG(col.name == current_names_and_types[i].name, "Column name mismatch, expect: {}, actual: {}", col.name, current_names_and_types[i].name); + columns_after_cast->push_back(col); + columns_after_cast->back().type = current_names_and_types[i].type; } } return std::make_shared(rs_operator, before_where, filter_columns, filter_column_name, extra_cast, columns_after_cast); From 68a9ffabbc4a3c75b3426134c3986b486cc9398a Mon Sep 17 00:00:00 2001 From: Lloyd-Pottiger Date: Tue, 16 May 2023 10:06:53 +0800 Subject: [PATCH 6/7] fix recordProfileStreams Signed-off-by: Lloyd-Pottiger --- dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp b/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp index 21735024745..ff0cf31391d 100644 --- a/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp +++ b/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp @@ -452,7 +452,10 @@ void DAGStorageInterpreter::executeImpl(DAGPipeline & pipeline) /// If there is no local stream, there is no need to execute cast and push down filter, return directly. /// But we should make sure that the analyzer is initialized before return. if (remote_read_streams_start_index == 0) + { + recordProfileStreams(pipeline, table_scan.getTableScanExecutorID()); return; + } /// handle timezone/duration cast for local and remote table scan. executeCastAfterTableScan(remote_read_streams_start_index, pipeline); recordProfileStreams(pipeline, table_scan.getTableScanExecutorID()); From 50fe23eeea5fa33254e6f4997597981604b709f0 Mon Sep 17 00:00:00 2001 From: Lloyd-Pottiger Date: Tue, 16 May 2023 16:01:41 +0800 Subject: [PATCH 7/7] fix recordProfileStreams of PushedDownFilter Signed-off-by: Lloyd-Pottiger --- dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp b/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp index ff0cf31391d..052af71b8bc 100644 --- a/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp +++ b/dbms/src/Flash/Coprocessor/DAGStorageInterpreter.cpp @@ -454,6 +454,8 @@ void DAGStorageInterpreter::executeImpl(DAGPipeline & pipeline) if (remote_read_streams_start_index == 0) { recordProfileStreams(pipeline, table_scan.getTableScanExecutorID()); + if (filter_conditions.hasValue()) + recordProfileStreams(pipeline, filter_conditions.executor_id); return; } /// handle timezone/duration cast for local and remote table scan.