Skip to content

Commit

Permalink
This is an automated cherry-pick of pingcap#8702
Browse files Browse the repository at this point in the history
Signed-off-by: ti-chi-bot <[email protected]>
  • Loading branch information
JinheLin authored and ti-chi-bot committed Jan 18, 2024
1 parent 8e4722f commit e0e97cc
Show file tree
Hide file tree
Showing 2 changed files with 39 additions and 1 deletion.
2 changes: 1 addition & 1 deletion dbms/src/Interpreters/Settings.h
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ struct Settings
M(SettingUInt64, dt_max_sharing_column_bytes_for_all, 2048 * Constant::MB, "Memory limitation for data sharing of all requests. 0 means disable data sharing") \
M(SettingUInt64, dt_max_sharing_column_count, 5, "ColumnPtr object limitation for data sharing of each DMFileReader::Stream. 0 means disable data sharing") \
M(SettingBool, dt_enable_bitmap_filter, true, "Use bitmap filter to read data or not") \
M(SettingDouble, dt_read_thread_count_scale, 1.0, "Number of read thread = number of logical cpu cores * dt_read_thread_count_scale. Only has meaning at server startup.") \
M(SettingDouble, dt_read_thread_count_scale, 2.0, "Number of read thread = number of logical cpu cores * dt_read_thread_count_scale. Only has meaning at server startup.") \
M(SettingDouble, dt_filecache_max_downloading_count_scale, 1.0, "Max downloading task count of FileCache = io thread count * dt_filecache_max_downloading_count_scale.") \
M(SettingUInt64, dt_filecache_min_age_seconds, 1800, "Files of the same priority can only be evicted from files that were not accessed within `dt_filecache_min_age_seconds` seconds.") \
M(SettingUInt64, dt_small_file_size_threshold, 128 * 1024, "When S3 is enabled, file size less than dt_small_file_size_threshold will be merged before uploading to S3") \
Expand Down
38 changes: 38 additions & 0 deletions dbms/src/Storages/DeltaMerge/SegmentReadTaskPool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,45 @@ BlockInputStreamPtr SegmentReadTaskPool::buildInputStream(SegmentReadTaskPtr & t
return stream;
}

<<<<<<< HEAD
void SegmentReadTaskPool::finishSegment(const SegmentPtr & seg)
=======
SegmentReadTaskPool::SegmentReadTaskPool(
int extra_table_id_index_,
const ColumnDefines & columns_to_read_,
const PushDownFilterPtr & filter_,
uint64_t max_version_,
size_t expected_block_size_,
ReadMode read_mode_,
SegmentReadTasks && tasks_,
AfterSegmentRead after_segment_read_,
const String & tracing_id,
bool enable_read_thread_,
Int64 num_streams_,
const String & res_group_name_)
: pool_id(nextPoolId())
, mem_tracker(current_memory_tracker == nullptr ? nullptr : current_memory_tracker->shared_from_this())
, extra_table_id_index(extra_table_id_index_)
, columns_to_read(columns_to_read_)
, filter(filter_)
, max_version(max_version_)
, expected_block_size(expected_block_size_)
, read_mode(read_mode_)
, tasks_wrapper(enable_read_thread_, std::move(tasks_))
, after_segment_read(after_segment_read_)
, log(Logger::get(tracing_id))
, unordered_input_stream_ref_count(0)
, exception_happened(false)
// If the queue is too short, only 1 in the extreme case, it may cause the computation thread
// to encounter empty queues frequently, resulting in too much waiting and thread context switching.
// We limit the length of block queue to be 1.5 times of `num_streams_`, and in the extreme case,
// when `num_streams_` is 1, `block_slot_limit` is at least 2.
, block_slot_limit(std::ceil(num_streams_ * 1.5))
// Limiting the minimum number of reading segments to 2 is to avoid, as much as possible,
// situations where the computation may be faster and the storage layer may not be able to keep up.
, active_segment_limit(std::max(num_streams_, 2))
, res_group_name(res_group_name_)
>>>>>>> 1614f4ad44 (Storages: Update the length of Block queue and the number of read threads (#8702))
{
after_segment_read(dm_context, seg);
bool pool_finished = false;
Expand Down

0 comments on commit e0e97cc

Please sign in to comment.