Skip to content

Commit

Permalink
[Fix](Variant) sparse columns should not be added in init segment ite…
Browse files Browse the repository at this point in the history
…rator
  • Loading branch information
eldenmoon committed Sep 3, 2024
1 parent a1ea5c4 commit 939faba
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 5 deletions.
9 changes: 5 additions & 4 deletions be/src/olap/rowset/segment_v2/segment.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -553,7 +553,7 @@ Status Segment::_create_column_readers(const SegmentFooterPB& footer) {
vectorized::PathInData path;
path.from_protobuf(spase_column_pb.column_path_info());
// Read from root column, so reader is nullptr
_sparse_column_tree[column.unique_id()].add(
_sparse_column_tree[unique_id].add(
path.copy_pop_front(),
SubcolumnReader {nullptr,
vectorized::DataTypeFactory::instance().create_data_type(
Expand Down Expand Up @@ -617,9 +617,10 @@ Status Segment::new_column_iterator_with_path(const TabletColumn& tablet_column,
const auto* node = tablet_column.has_path_info()
? _sub_column_tree[unique_id].find_exact(relative_path)
: nullptr;
const auto* sparse_node = tablet_column.has_path_info()
? _sparse_column_tree[unique_id].find_exact(relative_path)
: nullptr;
const auto* sparse_node =
tablet_column.has_path_info() && _sparse_column_tree.contains(unique_id)
? _sparse_column_tree[unique_id].find_exact(relative_path)
: nullptr;
// Currently only compaction and checksum need to read flat leaves
// They both use tablet_schema_with_merged_max_schema_version as read schema
auto type_to_read_flat_leaves = [](ReaderType type) {
Expand Down
2 changes: 1 addition & 1 deletion regression-test/suites/variant_p0/load.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ suite("regression_test_variant", "p0"){
sql """insert into ${table_name} values (5, '{"i" : 1}'), (1, '{"a" : 1}')"""
sql """insert into ${table_name} values (6, '{"j" : 1}'), (1, '{"a" : 1}')"""
sql """insert into ${table_name} values (6, '{"k" : 1}'), (1, '{"a" : 1}')"""
sql "select * from ${table_name}"
sql "select /*+SET_VAR(batch_size=4064,broker_load_batch_size=16352,disable_streaming_preaggregations=false,enable_distinct_streaming_aggregation=true,parallel_fragment_exec_instance_num=1,parallel_pipeline_task_num=4,profile_level=1,enable_pipeline_engine=true,enable_parallel_scan=true,parallel_scan_max_scanners_count=16,parallel_scan_min_rows_per_scanner=128,enable_fold_constant_by_be=true,enable_rewrite_element_at_to_slot=true,runtime_filter_type=2,enable_parallel_result_sink=false,sort_phase_num=0,enable_nereids_planner=true,rewrite_or_to_in_predicate_threshold=2,enable_function_pushdown=true,enable_common_expr_pushdown=true,enable_local_exchange=true,partitioned_hash_join_rows_threshold=1048576,partitioned_hash_agg_rows_threshold=8,partition_pruning_expand_threshold=10,enable_share_hash_table_for_broadcast_join=true,enable_two_phase_read_opt=true,enable_common_expr_pushdown_for_inverted_index=false,enable_delete_sub_predicate_v2=true,min_revocable_mem=33554432,fetch_remote_schema_timeout_seconds=120,max_fetch_remote_schema_tablet_count=512,enable_join_spill=false,enable_sort_spill=false,enable_agg_spill=false,enable_force_spill=false,data_queue_max_blocks=1,spill_streaming_agg_mem_limit=268435456,external_agg_partition_bits=5) */ * from ${table_name}"
qt_sql_36_1 "select cast(v['a'] as int), cast(v['b'] as int), cast(v['c'] as int) from ${table_name} order by k limit 10"
sql "DELETE FROM ${table_name} WHERE k=1"
sql "select * from ${table_name}"
Expand Down

0 comments on commit 939faba

Please sign in to comment.