diff --git a/CMakeLists.txt b/CMakeLists.txt index 9cda425b08..85d1d2b8fe 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -191,7 +191,7 @@ endif() # We are using clang from llvm toolchain as default compiler as well as clang-format and clang-tidy # It is possible to build taraxa-node also with other C++ compilers but to contribute to the official repo, # changes must pass clang-format/clang-tidy checks for which we internally use llvm version=LLVM_VERSION -set(LLVM_VERSION "18") +set(LLVM_VERSION "17") # clang-tidy include(CMakeModules/clang_tidy.cmake) @@ -202,7 +202,7 @@ include(CMakeModules/clang_format.cmake) # cppcheck include(CMakeModules/cppcheck.cmake) -add_custom_target(check-static DEPENDS cpp-check) +add_custom_target(check-static DEPENDS cpp-check clang-format clang-format-check) # execute command to get git info include(CMakeModules/git_info.cmake) diff --git a/CMakeModules/clang_format.cmake b/CMakeModules/clang_format.cmake index f2963c9d4e..1da82aa815 100644 --- a/CMakeModules/clang_format.cmake +++ b/CMakeModules/clang_format.cmake @@ -19,7 +19,7 @@ else () ${CMAKE_CURRENT_SOURCE_DIR}/tests/*.[ch]pp) # TODO: print error/line - add_custom_target(clang-format-check + add_custom_target(clang-format-check ALL COMMAND ! ${CLANG_FORMAT_EXE} -style=file -fallback-style=none --output-replacements-xml ${ALL_SOURCE_FILES} diff --git a/Dockerfile b/Dockerfile index d93298087d..c474220aea 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,7 +6,7 @@ ARG BUILD_OUTPUT_DIR=cmake-docker-build-debug FROM ubuntu:24.04@sha256:e3f92abc0967a6c19d0dfa2d55838833e947b9d74edbcb0113e48535ad4be12a as builder # deps versions -ARG LLVM_VERSION=18 +ARG LLVM_VERSION=17 # Avoid prompts from apt ENV DEBIAN_FRONTEND=noninteractive @@ -70,7 +70,6 @@ RUN ln -s /usr/bin/clang++-${LLVM_VERSION} /usr/bin/clang++ # Install conan RUN apt-get remove -y python3-distro RUN pip3 install conan==1.64.1 --break-system-packages -COPY CMakeModules/settings.yml /root/.conan/settings.yml # Install conan deps WORKDIR /opt/taraxa/ diff --git a/doc/building.md b/doc/building.md index 7b38a6edd4..7af1fedd94 100644 --- a/doc/building.md +++ b/doc/building.md @@ -20,8 +20,9 @@ will build out of the box without further effort: autoconf \ ccache \ cmake \ - clang-format-18 \ - clang-tidy-18 \ + clang-format-17 \ + clang-tidy-17 \ + llvm-17 \ golang-go \ python3-full \ # this libs are required for arm build by go part. you can skip it for amd64 build @@ -41,8 +42,8 @@ will build out of the box without further effort: sudo python3 -m pip install conan==1.64.1 # Setup clang as default compiler either in your IDE or by env. variables" - export CC="clang-18" - export CXX="clang++-18" + export CC="clang-17" + export CXX="clang++-17" ### Clone the Repository @@ -55,10 +56,14 @@ will build out of the box without further effort: # Optional - one time action # Create clang profile # It is recommended to use clang because on other compilers you could face some errors - conan profile new clang --detect - - # Copy custom settings - cp ./CMakeModules/settings.yml ~/.conan/settings.yml + conan profile new clang --detect \ + && conan profile update settings.compiler=clang clang \ + && conan profile update settings.compiler.version=17 clang \ + && conan profile update settings.compiler.libcxx=libstdc++11 clang \ + && conan profile update settings.build_type=RelWithDebInfo clang \ + && conan profile update env.CC=clang-17 clang \ + && conan profile update env.CXX=clang++-17 clang \ + && conan install --build missing -pr=clang . # Compile project using cmake mkdir cmake-build diff --git a/libraries/aleth/libp2p/ENR.cpp b/libraries/aleth/libp2p/ENR.cpp index b5b148c12b..f2b8923ae7 100644 --- a/libraries/aleth/libp2p/ENR.cpp +++ b/libraries/aleth/libp2p/ENR.cpp @@ -204,7 +204,8 @@ PublicCompressed IdentitySchemeV4::publicKey(ENR const& _enr) { } std::ostream& operator<<(std::ostream& _out, ENR const& _enr) { - _out << "[ seq=" << _enr.sequenceNumber() << " " << "id=" << _enr.id() << " "; + _out << "[ seq=" << _enr.sequenceNumber() << " " + << "id=" << _enr.id() << " "; try { auto const pubKey = IdentitySchemeV4::publicKey(_enr); diff --git a/libraries/aleth/libp2p/Host.cpp b/libraries/aleth/libp2p/Host.cpp index 03bb2c7e59..1e773acf73 100644 --- a/libraries/aleth/libp2p/Host.cpp +++ b/libraries/aleth/libp2p/Host.cpp @@ -137,7 +137,8 @@ Host::~Host() { } } // We need to poll both as strand_ is ioc_ - while (0 < session_ioc_.poll() + ioc_.poll()); + while (0 < session_ioc_.poll() + ioc_.poll()) + ; save_state(); ioc_.restart(); diff --git a/libraries/aleth/libp2p/NodeTable.h b/libraries/aleth/libp2p/NodeTable.h index 52e70e6e92..fb61806d7d 100644 --- a/libraries/aleth/libp2p/NodeTable.h +++ b/libraries/aleth/libp2p/NodeTable.h @@ -416,8 +416,9 @@ struct NodeEntry { }; inline std::ostream& operator<<(std::ostream& _out, NodeTable const& _nodeTable) { - _out << _nodeTable.m_hostNodeID << "\t" << "0\t" << _nodeTable.m_hostNodeEndpoint.address() << ":" - << _nodeTable.m_hostNodeEndpoint.udpPort() << std::endl; + _out << _nodeTable.m_hostNodeID << "\t" + << "0\t" << _nodeTable.m_hostNodeEndpoint.address() << ":" << _nodeTable.m_hostNodeEndpoint.udpPort() + << std::endl; auto s = _nodeTable.snapshot(); for (auto n : s) _out << n.id() << "\t" << n.distance << "\t" << n.endpoint() << "\n"; return _out; diff --git a/libraries/common/include/common/encoding_rlp.hpp b/libraries/common/include/common/encoding_rlp.hpp index cfef0697bc..90cb6918ee 100644 --- a/libraries/common/include/common/encoding_rlp.hpp +++ b/libraries/common/include/common/encoding_rlp.hpp @@ -67,8 +67,8 @@ void rlp(RLPEncoderRef encoding, std::pair const& target) { } template -auto rlp(RLPEncoderRef encoding, Sequence const& target) -> decltype(target.size(), target.begin(), target.end(), - void()) { +auto rlp(RLPEncoderRef encoding, Sequence const& target) + -> decltype(target.size(), target.begin(), target.end(), void()) { encoding.appendList(target.size()); for (auto const& v : target) { rlp(encoding, v); diff --git a/libraries/config/src/hardfork.cpp b/libraries/config/src/hardfork.cpp index 78b6642f17..60b6119c6d 100644 --- a/libraries/config/src/hardfork.cpp +++ b/libraries/config/src/hardfork.cpp @@ -53,7 +53,8 @@ RLP_FIELDS_DEFINE(AspenHardfork, block_num_part_one, block_num_part_two, max_sup bool FicusHardforkConfig::isFicusHardfork(taraxa::PbftPeriod period) const { return period >= block_num; } bool FicusHardforkConfig::isPillarBlockPeriod(taraxa::PbftPeriod period, bool skip_first_pillar_block) const { - return period >= block_num && period >= firstPillarBlockPeriod() + (skip_first_pillar_block ? 1 : 0) * pillar_blocks_interval && + return period >= block_num && + period >= firstPillarBlockPeriod() + (skip_first_pillar_block ? 1 : 0) * pillar_blocks_interval && period % pillar_blocks_interval == 0; } diff --git a/libraries/core_libs/consensus/include/dag/dag.hpp b/libraries/core_libs/consensus/include/dag/dag.hpp index 9fadacd629..9f10ac9fa1 100644 --- a/libraries/core_libs/consensus/include/dag/dag.hpp +++ b/libraries/core_libs/consensus/include/dag/dag.hpp @@ -130,7 +130,8 @@ class vertex_label_writer { vertex_label_writer(Property1 name) : name(name) {} template void operator()(std::ostream &out, const Vertex &v) const { - out << "[label=\"" << name[v].toString().substr(0, 8) << " " << "\"]"; + out << "[label=\"" << name[v].toString().substr(0, 8) << " " + << "\"]"; } private: diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index 4d593ab0b7..715917290b 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -229,8 +229,9 @@ class FinalChainImpl final : public FinalChain { for (const auto& r : exec_results) { LogEntries logs; logs.reserve(r.logs.size()); - std::transform(r.logs.cbegin(), r.logs.cend(), std::back_inserter(logs), - [](const auto& l) { return LogEntry{l.address, l.topics, l.data}; }); + std::transform(r.logs.cbegin(), r.logs.cend(), std::back_inserter(logs), [](const auto& l) { + return LogEntry{l.address, l.topics, l.data}; + }); transactions_gas_used.push_back(r.gas_used); receipts.emplace_back(TransactionReceipt{ r.code_err.empty() && r.consensus_err.empty(), diff --git a/libraries/core_libs/consensus/src/final_chain/trie_common.cpp b/libraries/core_libs/consensus/src/final_chain/trie_common.cpp index 3c6ff1b963..3c3a33d519 100644 --- a/libraries/core_libs/consensus/src/final_chain/trie_common.cpp +++ b/libraries/core_libs/consensus/src/final_chain/trie_common.cpp @@ -55,7 +55,8 @@ void hash256rlp(HexMap const& _s, HexMap::const_iterator _begin, HexMap::const_i for (auto i = std::next(_begin); i != _end && sharedPre; ++i, ++c) { unsigned x = std::min(sharedPre, std::min((unsigned)_begin->first.size(), (unsigned)i->first.size())); unsigned shared = _preLen; - for (; shared < x && _begin->first[shared] == i->first[shared]; ++shared); + for (; shared < x && _begin->first[shared] == i->first[shared]; ++shared) + ; sharedPre = std::min(shared, sharedPre); } if (sharedPre > _preLen) { @@ -71,7 +72,8 @@ void hash256rlp(HexMap const& _s, HexMap::const_iterator _begin, HexMap::const_i } for (auto i = 0; i < 16; ++i) { auto n = b; - for (; n != _end && n->first[_preLen] == i; ++n); + for (; n != _end && n->first[_preLen] == i; ++n) + ; if (b == n) { _rlp << ""; } else { diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index 60df06b9ff..12267610b0 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -1999,8 +1999,8 @@ bool PbftManager::validatePbftBlockPillarVotes(const PeriodData &period_data) co const auto current_pillar_block = pillar_chain_mgr_->getCurrentPillarBlock(); if (current_pillar_block->getPeriod() != required_votes_period) { - LOG(log_er_) << "Sync pillar votes required period " << required_votes_period - << " != " << " current pillar block period " << current_pillar_block->getPeriod(); + LOG(log_er_) << "Sync pillar votes required period " << required_votes_period << " != " + << " current pillar block period " << current_pillar_block->getPeriod(); return false; } diff --git a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp index c7787765be..0ce0835c93 100644 --- a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp +++ b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp @@ -416,8 +416,9 @@ std::pair> VoteManager::isUniqueVote(const std:: } std::stringstream err; - err << "Non unique vote: " << ", new vote hash (voted value): " << vote->getHash().abridged() << " (" - << vote->getBlockHash().abridged() << ")" + err << "Non unique vote: " + << ", new vote hash (voted value): " << vote->getHash().abridged() << " (" << vote->getBlockHash().abridged() + << ")" << ", orig. vote hash (voted value): " << found_voter_it->second.first->getHash().abridged() << " (" << found_voter_it->second.first->getBlockHash().abridged() << ")"; if (found_voter_it->second.second != nullptr) { @@ -482,8 +483,8 @@ std::pair> VoteManager::insertUniqueVote(const s } std::stringstream err; - err << "Unable to insert new unique vote(race condition): " << ", new vote hash (voted value): " - << vote->getHash().abridged() << " (" << vote->getBlockHash() << ")" + err << "Unable to insert new unique vote(race condition): " + << ", new vote hash (voted value): " << vote->getHash().abridged() << " (" << vote->getBlockHash() << ")" << ", orig. vote hash (voted value): " << inserted_vote.first->second.first->getHash().abridged() << " (" << inserted_vote.first->second.first->getBlockHash() << ")"; if (inserted_vote.first->second.second != nullptr) { diff --git a/libraries/core_libs/network/src/network.cpp b/libraries/core_libs/network/src/network.cpp index e690a77243..9091c68571 100644 --- a/libraries/core_libs/network/src/network.cpp +++ b/libraries/core_libs/network/src/network.cpp @@ -106,7 +106,10 @@ Network::Network(const FullNodeConfig &config, const h256 &genesis_hash, std::fi registerPeriodicEvents(pbft_mgr, trx_mgr); for (uint i = 0; i < tp_.capacity(); ++i) { - tp_.post_loop({100 + i * 20}, [this] { while (0 < host_->do_work()); }); + tp_.post_loop({100 + i * 20}, [this] { + while (0 < host_->do_work()) + ; + }); } LOG(log_nf_) << "Configured host. Listening on address: " << config.network.listen_ip << ":" diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp index 71eecf31e1..855df89d43 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp @@ -82,9 +82,9 @@ void StatusPacketHandler::process(const threadpool::PacketData& packet_data, con if (pbft_synced_period + node_history < peer_pbft_chain_size) { LOG((peers_state_->getPeersCount()) ? log_nf_ : log_er_) << "Light node " << packet_data.from_node_id_.abridged() - << " would not be able to serve our syncing request. " << "Current synced period " << pbft_synced_period - << ", peer synced period " << pbft_synced_period << ", peer light node history " << node_history - << ". Peer will be disconnected"; + << " would not be able to serve our syncing request. " + << "Current synced period " << pbft_synced_period << ", peer synced period " << pbft_synced_period + << ", peer light node history " << node_history << ". Peer will be disconnected"; disconnect(packet_data.from_node_id_, dev::p2p::UserReason); return; } diff --git a/libraries/core_libs/network/src/threadpool/priority_queue.cpp b/libraries/core_libs/network/src/threadpool/priority_queue.cpp index 8e71f7f585..8311626ddb 100644 --- a/libraries/core_libs/network/src/threadpool/priority_queue.cpp +++ b/libraries/core_libs/network/src/threadpool/priority_queue.cpp @@ -24,7 +24,8 @@ PriorityQueue::PriorityQueue(size_t tp_workers_count, const addr_t& node_addr) packets_queues_[PacketData::PacketPriority::Mid].setMaxWorkersCount(mid_priority_queue_workers); packets_queues_[PacketData::PacketPriority::Low].setMaxWorkersCount(low_priority_queue_workers); - LOG(log_nf_) << "Priority queues initialized accordingly: " << "total num of workers = " << MAX_TOTAL_WORKERS_COUNT + LOG(log_nf_) << "Priority queues initialized accordingly: " + << "total num of workers = " << MAX_TOTAL_WORKERS_COUNT << ", High priority packets max num of workers = " << high_priority_queue_workers << ", Mid priority packets max num of workers = " << mid_priority_queue_workers << ", Low priority packets max num of workers = " << low_priority_queue_workers; diff --git a/tests/final_chain_test.cpp b/tests/final_chain_test.cpp index 726dcd1648..4cf505ee25 100644 --- a/tests/final_chain_test.cpp +++ b/tests/final_chain_test.cpp @@ -103,7 +103,8 @@ struct FinalChainTest : WithDataDir { EXPECT_EQ(blk_h.timestamp, pbft_block->getTimestamp()); EXPECT_EQ(receipts.size(), trxs.size()); EXPECT_EQ(blk_h.transactions_root, - trieRootOver(trxs.size(), [&](auto i) { return dev::rlp(i); }, [&](auto i) { return trxs[i]->rlp(); })); + trieRootOver( + trxs.size(), [&](auto i) { return dev::rlp(i); }, [&](auto i) { return trxs[i]->rlp(); })); EXPECT_EQ(blk_h.receipts_root, trieRootOver( trxs.size(), [&](auto i) { return dev::rlp(i); }, [&](auto i) { return util::rlp_enc(receipts[i]); })); diff --git a/tests/p2p_test.cpp b/tests/p2p_test.cpp index edc96f0777..1f84dbdbfb 100644 --- a/tests/p2p_test.cpp +++ b/tests/p2p_test.cpp @@ -105,8 +105,8 @@ TEST_F(P2PTest, multiple_capabilities) { dev::p2p::NetworkConfig net_conf("127.0.0.1", 20001, false, true); TaraxaNetworkConfig taraxa_net_conf; taraxa_net_conf.is_boot_node = true; - auto boot_node = - Host::make("TaraxaNode", [](auto /*host*/) { return Host::CapabilityList{}; }, key, net_conf, taraxa_net_conf); + auto boot_node = Host::make( + "TaraxaNode", [](auto /*host*/) { return Host::CapabilityList{}; }, key, net_conf, taraxa_net_conf); const auto &boot_node_key = boot_node->id(); util::ThreadPool boot_node_tp;