Skip to content

Commit

Permalink
Fix a server crash
Browse files Browse the repository at this point in the history
When chunk_group_row_limit is bigger than 110000, there is a crash
caused by ReadStripeNextVector().
  • Loading branch information
japinli committed Jan 31, 2024
1 parent faa1889 commit 4c50da5
Show file tree
Hide file tree
Showing 4 changed files with 39 additions and 6 deletions.
6 changes: 3 additions & 3 deletions columnar/src/backend/columnar/columnar_reader.c
Original file line number Diff line number Diff line change
Expand Up @@ -2162,8 +2162,8 @@ ReadStripeNextVector(StripeReadState *stripeReadState, Datum *columnValues,
if (*newVectorSize == 0)
continue;
}

stripeReadState->currentRow += stripeReadState->chunkGroupReadState->rowCount;
else
stripeReadState->currentRow += stripeReadState->chunkGroupReadState->rowCount;

return true;
}
Expand Down Expand Up @@ -2260,4 +2260,4 @@ ReadChunkGroupNextVector(ChunkGroupReadState *chunkGroupReadState, Datum *column
}

return true;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ SELECT columnar.alter_columnar_table_set('test', compression => 'lz4');
INSERT INTO test VALUES(1);
VACUUM VERBOSE test;
INFO: statistics for "test":
storage id: 10000000141
storage id: 10000000142
total file size: 24576, total data size: 6
compression rate: 0.83x
total row count: 1, stripe count: 1, average rows per stripe: 1
Expand All @@ -72,7 +72,7 @@ chunk count: 1, containing data for dropped columns: 0, lz4 compressed: 1
ALTER TABLE test ALTER COLUMN i TYPE int8;
VACUUM VERBOSE test;
INFO: statistics for "test":
storage id: 10000000142
storage id: 10000000143
total file size: 24576, total data size: 10
compression rate: 0.90x
total row count: 1, stripe count: 1, average rows per stripe: 1
Expand Down
18 changes: 18 additions & 0 deletions columnar/src/test/regress/expected/columnar_query.out
Original file line number Diff line number Diff line change
Expand Up @@ -310,3 +310,21 @@ SELECT * FROM t WHERE a >= 90;
(11 rows)

DROP TABLE t;
--
-- [columnar] Test chunk_group_row_limit
--
CREATE TABLE t(a INT) USING columnar;
SELECT columnar.alter_columnar_table_set('t', chunk_group_row_limit => '11000');
alter_columnar_table_set
--------------------------

(1 row)

INSERT INTO t SELECT a FROM generate_series(0,50000) AS a;
SELECT count(*) FROM t;
count
-------
50001
(1 row)

DROP TABLE t;
17 changes: 16 additions & 1 deletion columnar/src/test/regress/sql/columnar_query.sql
Original file line number Diff line number Diff line change
Expand Up @@ -159,4 +159,19 @@ SELECT * FROM t WHERE a >= 90;

SELECT * FROM t WHERE a >= 90;

DROP TABLE t;
DROP TABLE t;


--
-- [columnar] Test chunk_group_row_limit
--

CREATE TABLE t(a INT) USING columnar;

SELECT columnar.alter_columnar_table_set('t', chunk_group_row_limit => '11000');

INSERT INTO t SELECT a FROM generate_series(0,50000) AS a;

SELECT count(*) FROM t;

DROP TABLE t;

0 comments on commit 4c50da5

Please sign in to comment.