Skip to content

Commit

Permalink
[fix](serde)fix the bug in DataTypeNullableSerDe.deserialize_column_f…
Browse files Browse the repository at this point in the history
…rom_fixed_json (#38245)

## Proposed changes
fix a bug in DataTypeNullableSerDe.deserialize_column_from_fixed_json.

The expected behavior of the `deserialize_column_from_fixed_json`
function is to `insert` n values ​​into the column.

However, when the `DataTypeNullableSerDe` class implements this
function, the null_map column is `resize` to n, which does not insert n
values ​​into it. Since this function is only used by the
`_fill_partition_columns` of the `parquet/orc reader` and is not called
repeatedly for a `get_next_block`, this bug is covered up.
before pr : #37377
  • Loading branch information
hubgeter authored and dataroaring committed Aug 16, 2024
1 parent fa18755 commit 1ce5b99
Show file tree
Hide file tree
Showing 11 changed files with 210 additions and 5 deletions.
12 changes: 7 additions & 5 deletions be/src/vec/data_types/serde/data_type_nullable_serde.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -135,12 +135,14 @@ Status DataTypeNullableSerDe::deserialize_column_from_fixed_json(
if (!st.ok()) {
return st;
}
auto& null_map = col.get_null_map_data();
auto& nested_column = col.get_nested_column();

null_map.resize_fill(
rows, null_map.back()); // data_type_nullable::insert_column_last_value_multiple_times()
if (rows - 1 != 0) {
auto& null_map = col.get_null_map_data();
auto& nested_column = col.get_nested_column();

uint8_t val = null_map.back();
size_t new_sz = null_map.size() + rows - 1;
null_map.resize_fill(new_sz,
val); // data_type_nullable::insert_column_last_value_multiple_times()
nested_serde->insert_column_last_value_multiple_times(nested_column, rows - 1);
}
*num_deserialized = rows;
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
use default;


CREATE TABLE orc_partition_multi_stripe (
col1 STRING,
col2 INT,
col3 DOUBLE
) PARTITIONED BY (
partition_col1 STRING,
partition_col2 INT
)
STORED AS ORC
LOCATION '/user/doris/preinstalled_data/orc_table/orc_partition_multi_stripe';
;
msck repair table orc_partition_multi_stripe;

CREATE TABLE parquet_partition_multi_row_group (
col1 STRING,
col2 INT,
col3 DOUBLE
) PARTITIONED BY (
partition_col1 STRING,
partition_col2 INT
)
STORED AS PARQUET
LOCATION '/user/doris/preinstalled_data/parquet_table/parquet_partition_multi_row_group';
;
msck repair table parquet_partition_multi_row_group;
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
-- This file is automatically generated. You should know what you did if you want to edit this
-- !parquet_1 --
word 2 2.3 hello 1
word 2 2.3 hello 1
word 2 2.3 hello 1
word 2 2.3 hello 1
word 2 2.3 hello 1

-- !parquet_2 --
1792

-- !parquet_3 --
1792

-- !parquet_4 --
1792

-- !parquet_5 --
1792

-- !parquet_6 --
1792

-- !parquet_7 --
word 1792

-- !parquet_8 --
hello 1792

-- !parquet_9 --
1 1792

-- !parquet_10 --
word 2 2.3 hello 1
word 2 2.3 hello 1
word 2 2.3 hello 1
word 2 2.3 hello 1
word 2 2.3 hello 1

-- !parquet_11 --
1792

-- !parquet_12 --
1792

-- !parquet_13 --
1792

-- !parquet_14 --
0

-- !orc_1 --
word 2 2.3 hello 1
word 2 2.3 hello 1
word 2 2.3 hello 1
word 2 2.3 hello 1
word 2 2.3 hello 1

-- !orc_2 --
7680

-- !orc_3 --
7680

-- !orc_4 --
7680

-- !orc_5 --
7680

-- !orc_6 --
7680

-- !orc_7 --
word 7680

-- !orc_8 --
hello 7680

-- !orc_9 --
1 7680

-- !orc_10 --
word 2 2.3 hello 1
word 2 2.3 hello 1
word 2 2.3 hello 1
word 2 2.3 hello 1
word 2 2.3 hello 1

-- !orc_11 --
7680

-- !orc_12 --
7680

-- !orc_13 --
7680

-- !orc_14 --
0

Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.


suite("test_hive_opt_fill_partition", "p0,external,hive,external_docker,external_docker_hive") {
String enabled = context.config.otherConfigs.get("enableHiveTest")
if (enabled != null && enabled.equalsIgnoreCase("true")) {
String hivePrefix ="hive3";
setHivePrefix(hivePrefix)
String externalEnvIp = context.config.otherConfigs.get("externalEnvIp")
String hmsPort = context.config.otherConfigs.get(hivePrefix + "HmsPort")
String hdfs_port = context.config.otherConfigs.get(hivePrefix + "HdfsPort")

String catalog_name = "test_hive_opt_fill_partition"
sql """drop catalog if exists ${catalog_name};"""
sql """
create catalog if not exists ${catalog_name} properties (
'type'='hms',
'hadoop.username' = 'hadoop',
'fs.defaultFS' = 'hdfs://${externalEnvIp}:${hdfs_port}',
'hive.metastore.uris' = 'thrift://${externalEnvIp}:${hmsPort}'
);
"""

sql """ switch ${catalog_name} """
sql """ use `default` """

qt_parquet_1 """ select * from parquet_partition_multi_row_group limit 5; """
qt_parquet_2 """ select count(col1) from parquet_partition_multi_row_group ; """
qt_parquet_3 """ select count(col2) from parquet_partition_multi_row_group ; """
qt_parquet_4 """ select count(col3) from parquet_partition_multi_row_group ; """
qt_parquet_5 """ select count(partition_col1) from parquet_partition_multi_row_group ; """
qt_parquet_6 """ select count(partition_col1) from parquet_partition_multi_row_group ; """
qt_parquet_7 """ select col1,count(*) from parquet_partition_multi_row_group group by col1; """
qt_parquet_8 """ select partition_col1,count(*) from parquet_partition_multi_row_group group by partition_col1; """
qt_parquet_9 """ select partition_col2,count(*) from parquet_partition_multi_row_group group by partition_col2; """
qt_parquet_10 """ select * from parquet_partition_multi_row_group where col1 = 'word' limit 5; """
qt_parquet_11 """ select count(*) from parquet_partition_multi_row_group where col2 != 100; """
qt_parquet_12 """ select count(*) from parquet_partition_multi_row_group where partition_col1 = 'hello' limit 5; """
qt_parquet_13 """ select count(*) from parquet_partition_multi_row_group where partition_col2 = 1 limit 5; """
qt_parquet_14 """ select count(*) from parquet_partition_multi_row_group where partition_col2 != 1 ; """


qt_orc_1 """ select * from orc_partition_multi_stripe limit 5; """
qt_orc_2 """ select count(col1) from orc_partition_multi_stripe ; """
qt_orc_3 """ select count(col2) from orc_partition_multi_stripe ; """
qt_orc_4 """ select count(col3) from orc_partition_multi_stripe ; """
qt_orc_5 """ select count(partition_col1) from orc_partition_multi_stripe ; """
qt_orc_6 """ select count(partition_col1) from orc_partition_multi_stripe ; """
qt_orc_7 """ select col1,count(*) from orc_partition_multi_stripe group by col1; """
qt_orc_8 """ select partition_col1,count(*) from orc_partition_multi_stripe group by partition_col1; """
qt_orc_9 """ select partition_col2,count(*) from orc_partition_multi_stripe group by partition_col2; """
qt_orc_10 """ select * from orc_partition_multi_stripe where col1 = 'word' limit 5; """
qt_orc_11 """ select count(*) from orc_partition_multi_stripe where col2 != 100; """
qt_orc_12 """ select count(*) from orc_partition_multi_stripe where partition_col1 = 'hello' limit 5; """
qt_orc_13 """ select count(*) from orc_partition_multi_stripe where partition_col2 = 1 limit 5; """
qt_orc_14 """ select count(*) from orc_partition_multi_stripe where partition_col2 != 1 ; """

}
}

0 comments on commit 1ce5b99

Please sign in to comment.