Skip to content

Commit

Permalink
Merge branch 'master' into add-cases-for-ip
Browse files Browse the repository at this point in the history
  • Loading branch information
amorynan authored Oct 30, 2024
2 parents f127902 + 3c7fe3f commit face753
Show file tree
Hide file tree
Showing 1,554 changed files with 53,336 additions and 20,423 deletions.
36 changes: 30 additions & 6 deletions .asf.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -70,16 +70,29 @@ github:
dismiss_stale_reviews: true
require_code_owner_reviews: true
required_approving_review_count: 1
branch-1.1-lts:

branch-3.0:
required_status_checks:
# if strict is true, means "Require branches to be up to date before merging".
strict: false
contexts:
- License Check

required_pull_request_reviews:
dismiss_stale_reviews: true
required_approving_review_count: 1
- Clang Formatter
- CheckStyle
- Build Broker
- ShellCheck
- Build Third Party Libraries (Linux)
- Build Third Party Libraries (macOS)
- FE UT (Doris FE UT)
- BE UT (Doris BE UT)
- Cloud UT (Doris Cloud UT)
- COMPILE (DORIS_COMPILE)
- P0 Regression (Doris Regression)
- External Regression (Doris External Regression)
- cloud_p0 (Doris Cloud Regression)
#required_pull_request_reviews:
# dismiss_stale_reviews: true
# required_approving_review_count: 1

branch-2.1:
required_status_checks:
Expand Down Expand Up @@ -124,6 +137,17 @@ github:
dismiss_stale_reviews: true
required_approving_review_count: 1

branch-1.1-lts:
required_status_checks:
# if strict is true, means "Require branches to be up to date before merging".
strict: false
contexts:
- License Check

required_pull_request_reviews:
dismiss_stale_reviews: true
required_approving_review_count: 1

collaborators:
- LemonLiTree
- Yukang-Lian
Expand All @@ -134,7 +158,7 @@ github:
- wm1581066
- KassieZ
- yujun777
- gavinchou
- doris-robot

notifications:
pullrequests_status: [email protected]
Expand Down
19 changes: 0 additions & 19 deletions .dlc.json

This file was deleted.

62 changes: 62 additions & 0 deletions .github/workflows/auto-cherry-pick.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
name: Auto Cherry-Pick to Branch

on:
pull_request_target:
types:
- closed
branches:
- master
permissions:
checks: write
contents: write
pull-requests: write
jobs:
auto_cherry_pick:
runs-on: ubuntu-latest
if: ${{ contains(github.event.pull_request.labels.*.name, 'dev/3.0.x') && github.event.pull_request.merged == true }}
steps:
- name: Checkout repository
uses: actions/checkout@v3

- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.x'

- name: Install dependencies
run: |
pip install PyGithub
- name: Check SHA
run: |
expected_sha="80b7c6087f2a3e4f4c7f035a52e8e7b05ce00f27aa5c1bd52179df685c912447f94a96145fd3204a3958d8ed9777de5a5183b120e99e0e95bbca0366d69b0ac0"
calculated_sha=$(sha512sum tools/auto-pick-script.py | awk '{ print $1 }')
if [ "$calculated_sha" != "$expected_sha" ]; then
echo "SHA mismatch! Expected: $expected_sha, but got: $calculated_sha"
exit 1
else
echo "SHA matches: $calculated_sha"
fi
- name: Auto cherry-pick
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
REPO_NAME: ${{ github.repository }}
CONFLICT_LABEL: cherry-pick-conflict-in-3.0
run: |
python tools/auto-pick-script.py ${{ github.event.pull_request.number }} branch-3.0
37 changes: 0 additions & 37 deletions .github/workflows/deadlink-check.yml

This file was deleted.

7 changes: 5 additions & 2 deletions be/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -299,8 +299,6 @@ if (COMPILER_CLANG)
-Wno-implicit-float-conversion
-Wno-implicit-int-conversion
-Wno-sign-conversion
-Wno-missing-field-initializers
-Wno-unused-const-variable
-Wno-shorten-64-to-32)
if (USE_LIBCPP)
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-stdlib=libc++>)
Expand Down Expand Up @@ -344,6 +342,10 @@ if (ENABLE_INJECTION_POINT)
set(CXX_COMMON_FLAGS "${CXX_COMMON_FLAGS} -DENABLE_INJECTION_POINT")
endif()

if (ENABLE_CACHE_LOCK_DEBUG)
set(CXX_COMMON_FLAGS "${CXX_COMMON_FLAGS} -DENABLE_CACHE_LOCK_DEBUG")
endif()

# Enable memory tracker, which allows BE to limit the memory of tasks such as query, load,
# and compaction,and observe the memory of BE through be_ip:http_port/MemTracker.
# Adding the option `USE_MEM_TRACKER=OFF sh build.sh` when compiling can turn off the memory tracker,
Expand Down Expand Up @@ -784,6 +786,7 @@ install(DIRECTORY DESTINATION ${OUTPUT_DIR}/conf)
install(FILES
${BASE_DIR}/../bin/start_be.sh
${BASE_DIR}/../bin/stop_be.sh
${BASE_DIR}/../tools/jeprof
PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE
GROUP_READ GROUP_WRITE GROUP_EXECUTE
WORLD_READ WORLD_EXECUTE
Expand Down
4 changes: 4 additions & 0 deletions be/src/cloud/cloud_full_compaction.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,10 @@ Status CloudFullCompaction::modify_rowsets() {
compaction_job->set_num_output_rows(_output_rowset->num_rows());
compaction_job->set_size_input_rowsets(_input_rowsets_size);
compaction_job->set_size_output_rowsets(_output_rowset->data_disk_size());
DBUG_EXECUTE_IF("CloudFullCompaction::modify_rowsets.wrong_compaction_data_size", {
compaction_job->set_size_input_rowsets(1);
compaction_job->set_size_output_rowsets(10000001);
})
compaction_job->set_num_input_segments(_input_segments);
compaction_job->set_num_output_segments(_output_rowset->num_segments());
compaction_job->set_num_input_rowsets(_input_rowsets.size());
Expand Down
1 change: 1 addition & 0 deletions be/src/cloud/cloud_meta_mgr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -880,6 +880,7 @@ Status CloudMetaMgr::abort_txn(const StreamLoadContext& ctx) {
AbortTxnRequest req;
AbortTxnResponse res;
req.set_cloud_unique_id(config::cloud_unique_id);
req.set_reason(std::string(ctx.status.msg().substr(0, 1024)));
if (ctx.db_id > 0 && !ctx.label.empty()) {
req.set_db_id(ctx.db_id);
req.set_label(ctx.label);
Expand Down
15 changes: 8 additions & 7 deletions be/src/cloud/cloud_rowset_writer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -115,13 +115,14 @@ Status CloudRowsetWriter::build(RowsetSharedPtr& rowset) {
} else {
_rowset_meta->add_segments_file_size(seg_file_size.value());
}

if (auto idx_files_info = _idx_files_info.get_inverted_files_info(_segment_start_id);
!idx_files_info.has_value()) [[unlikely]] {
LOG(ERROR) << "expected inverted index files info, but none presents: "
<< idx_files_info.error();
} else {
_rowset_meta->add_inverted_index_files_info(idx_files_info.value());
if (rowset_schema->has_inverted_index()) {
if (auto idx_files_info = _idx_files.inverted_index_file_info(_segment_start_id);
!idx_files_info.has_value()) [[unlikely]] {
LOG(ERROR) << "expected inverted index files info, but none presents: "
<< idx_files_info.error();
} else {
_rowset_meta->add_inverted_index_files_info(idx_files_info.value());
}
}

RETURN_NOT_OK_STATUS_WITH_WARN(RowsetFactory::create_rowset(rowset_schema, _context.tablet_path,
Expand Down
6 changes: 4 additions & 2 deletions be/src/cloud/cloud_storage_engine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -558,14 +558,16 @@ std::vector<CloudTabletSPtr> CloudStorageEngine::_generate_cloud_compaction_task
} else if (config::enable_parallel_cumu_compaction) {
filter_out = [&tablet_preparing_cumu_compaction](CloudTablet* t) {
return tablet_preparing_cumu_compaction.contains(t->tablet_id()) ||
(t->tablet_state() != TABLET_RUNNING && t->alter_version() == -1);
(t->tablet_state() != TABLET_RUNNING &&
(!config::enable_new_tablet_do_compaction || t->alter_version() == -1));
};
} else {
filter_out = [&tablet_preparing_cumu_compaction,
&submitted_cumu_compactions](CloudTablet* t) {
return tablet_preparing_cumu_compaction.contains(t->tablet_id()) ||
submitted_cumu_compactions.contains(t->tablet_id()) ||
(t->tablet_state() != TABLET_RUNNING && t->alter_version() == -1);
(t->tablet_state() != TABLET_RUNNING &&
(!config::enable_new_tablet_do_compaction || t->alter_version() == -1));
};
}

Expand Down
2 changes: 1 addition & 1 deletion be/src/cloud/cloud_tablet.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -433,7 +433,7 @@ void CloudTablet::recycle_cached_data(const std::vector<RowsetSharedPtr>& rowset
// TODO: Segment::file_cache_key
auto file_key = Segment::file_cache_key(rs->rowset_id().to_string(), seg_id);
auto* file_cache = io::FileCacheFactory::instance()->get_by_path(file_key);
file_cache->remove_if_cached(file_key);
file_cache->remove_if_cached_async(file_key);
}
}
}
Expand Down
9 changes: 9 additions & 0 deletions be/src/cloud/injection_point_action.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,15 @@ void register_suites() {
sp->set_call_back("VOlapTableSink::close",
[](auto&&) { std::this_thread::sleep_for(std::chrono::seconds(5)); });
});
// curl be_ip:http_port/api/injection_point/apply_suite?name=test_ttl_lru_evict'
suite_map.emplace("test_ttl_lru_evict", [] {
auto* sp = SyncPoint::get_instance();
sp->set_call_back("BlockFileCache::change_limit1", [](auto&& args) {
LOG(INFO) << "BlockFileCache::change_limit1";
auto* limit = try_any_cast<size_t*>(args[0]);
*limit = 1;
});
});
suite_map.emplace("test_file_segment_cache_corruption", [] {
auto* sp = SyncPoint::get_instance();
sp->set_call_back("Segment::open:corruption", [](auto&& args) {
Expand Down
2 changes: 1 addition & 1 deletion be/src/clucene
6 changes: 3 additions & 3 deletions be/src/common/compile_check_begin.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@
// specific language governing permissions and limitations
// under the License.

#pragma once

#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic error "-Wshorten-64-to-32"
#pragma clang diagnostic error "-Wconversion"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wfloat-conversion"
#endif
//#include "common/compile_check_begin.h"
2 changes: 0 additions & 2 deletions be/src/common/compile_check_end.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@
// specific language governing permissions and limitations
// under the License.

#pragma once

#ifdef __clang__
#pragma clang diagnostic pop
#endif
Expand Down
19 changes: 12 additions & 7 deletions be/src/common/config.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -396,7 +396,7 @@ DEFINE_mInt64(base_compaction_max_compaction_score, "20");
DEFINE_mDouble(base_compaction_min_data_ratio, "0.3");
DEFINE_mInt64(base_compaction_dup_key_max_file_size_mbytes, "1024");

DEFINE_Bool(enable_skip_tablet_compaction, "true");
DEFINE_Bool(enable_skip_tablet_compaction, "false");
// output rowset of cumulative compaction total disk size exceed this config size,
// this rowset will be given to base compaction, unit is m byte.
DEFINE_mInt64(compaction_promotion_size_mbytes, "1024");
Expand Down Expand Up @@ -901,7 +901,7 @@ DEFINE_mInt32(orc_natural_read_size_mb, "8");
DEFINE_mInt64(big_column_size_buffer, "65535");
DEFINE_mInt64(small_column_size_buffer, "100");

// rf will decide whether the next sampling_frequency blocks need to be filtered based on the filtering rate of the current block.
// Perform the always_true check at intervals determined by runtime_filter_sampling_frequency
DEFINE_mInt32(runtime_filter_sampling_frequency, "64");

// cooldown task configs
Expand All @@ -925,6 +925,9 @@ DEFINE_mBool(enable_query_like_bloom_filter, "true");
DEFINE_Int32(doris_remote_scanner_thread_pool_thread_num, "48");
// number of s3 scanner thread pool queue size
DEFINE_Int32(doris_remote_scanner_thread_pool_queue_size, "102400");
DEFINE_mInt64(block_cache_wait_timeout_ms, "1000");
DEFINE_mInt64(cache_lock_long_tail_threshold, "1000");
DEFINE_Int64(file_cache_recycle_keys_size, "1000000");

// limit the queue of pending batches which will be sent by a single nodechannel
DEFINE_mInt64(nodechannel_pending_queue_max_bytes, "67108864");
Expand Down Expand Up @@ -1022,7 +1025,7 @@ DEFINE_mInt32(inverted_index_cache_stale_sweep_time_sec, "600");
// inverted index searcher cache size
DEFINE_String(inverted_index_searcher_cache_limit, "10%");
DEFINE_Bool(enable_inverted_index_cache_check_timestamp, "true");
DEFINE_Int32(inverted_index_fd_number_limit_percent, "40"); // 40%
DEFINE_Int32(inverted_index_fd_number_limit_percent, "20"); // 20%
DEFINE_Int32(inverted_index_query_cache_shards, "256");

// inverted index match bitmap cache size
Expand All @@ -1039,7 +1042,7 @@ DEFINE_Int32(inverted_index_read_buffer_size, "4096");
// tree depth for bkd index
DEFINE_Int32(max_depth_in_bkd_tree, "32");
// index compaction
DEFINE_mBool(inverted_index_compaction_enable, "false");
DEFINE_mBool(inverted_index_compaction_enable, "true");
// Only for debug, do not use in production
DEFINE_mBool(debug_inverted_index_compaction, "false");
// index by RAM directory
Expand Down Expand Up @@ -1071,9 +1074,9 @@ DEFINE_mInt32(schema_cache_sweep_time_sec, "100");

// max number of segment cache, default -1 for backward compatibility fd_number*2/5
DEFINE_Int32(segment_cache_capacity, "-1");
DEFINE_Int32(segment_cache_fd_percentage, "40");
DEFINE_mInt32(estimated_mem_per_column_reader, "1024");
DEFINE_Int32(segment_cache_memory_percentage, "2");
DEFINE_Int32(segment_cache_fd_percentage, "20");
DEFINE_mInt32(estimated_mem_per_column_reader, "512");
DEFINE_Int32(segment_cache_memory_percentage, "5");

// enable feature binlog, default false
DEFINE_Bool(enable_feature_binlog, "false");
Expand Down Expand Up @@ -1348,6 +1351,8 @@ DEFINE_mInt32(lz4_compression_block_size, "262144");

DEFINE_mBool(enable_pipeline_task_leakage_detect, "false");

DEFINE_mInt32(check_score_rounds_num, "1000");

DEFINE_Int32(query_cache_size, "512");

// clang-format off
Expand Down
Loading

0 comments on commit face753

Please sign in to comment.