forked from bitcoin/bitcoin
-
Notifications
You must be signed in to change notification settings - Fork 1.2k
/
net_processing.cpp
5975 lines (5316 loc) · 273 KB
/
net_processing.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2020 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <net_processing.h>
#include <addrman.h>
#include <banman.h>
#include <blockencodings.h>
#include <blockfilter.h>
#include <chainparams.h>
#include <consensus/validation.h>
#include <hash.h>
#include <index/blockfilterindex.h>
#include <validation.h>
#include <merkleblock.h>
#include <netmessagemaker.h>
#include <netbase.h>
#include <net_types.h>
#include <node/blockstorage.h>
#include <policy/policy.h>
#include <primitives/block.h>
#include <primitives/transaction.h>
#include <random.h>
#include <reverse_iterator.h>
#include <scheduler.h>
#include <streams.h>
#include <tinyformat.h>
#include <index/txindex.h>
#include <txmempool.h>
#include <util/check.h> // For NDEBUG compile time check
#include <util/system.h>
#include <util/strencodings.h>
#include <algorithm>
#include <atomic>
#include <chrono>
#include <list>
#include <memory>
#include <optional>
#include <typeinfo>
#include <spork.h>
#include <governance/governance.h>
#include <masternode/sync.h>
#include <masternode/meta.h>
#ifdef ENABLE_WALLET
#include <coinjoin/client.h>
#endif // ENABLE_WALLET
#include <coinjoin/context.h>
#include <coinjoin/server.h>
#include <evo/deterministicmns.h>
#include <evo/mnauth.h>
#include <evo/simplifiedmns.h>
#include <llmq/blockprocessor.h>
#include <llmq/chainlocks.h>
#include <llmq/commitment.h>
#include <llmq/context.h>
#include <llmq/dkgsessionmgr.h>
#include <llmq/instantsend.h>
#include <llmq/options.h>
#include <llmq/quorums.h>
#include <llmq/signing.h>
#include <llmq/signing_shares.h>
#include <llmq/snapshot.h>
#include <statsd_client.h>
/** Maximum number of in-flight objects from a peer */
static constexpr int32_t MAX_PEER_OBJECT_IN_FLIGHT = 100;
/** Maximum number of announced objects from a peer */
static constexpr int32_t MAX_PEER_OBJECT_ANNOUNCEMENTS = 2 * MAX_INV_SZ;
/** How many microseconds to delay requesting transactions from inbound peers */
static constexpr std::chrono::microseconds INBOUND_PEER_TX_DELAY{std::chrono::seconds{2}};
/** How long to wait (in microseconds) before downloading a transaction from an additional peer */
static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL{std::chrono::seconds{60}};
/** Maximum delay (in microseconds) for transaction requests to avoid biasing some peers over others. */
static constexpr std::chrono::microseconds MAX_GETDATA_RANDOM_DELAY{std::chrono::seconds{2}};
/** How long to wait (expiry * factor microseconds) before expiring an in-flight getdata request to a peer */
static constexpr int64_t TX_EXPIRY_INTERVAL_FACTOR = 10;
static_assert(INBOUND_PEER_TX_DELAY >= MAX_GETDATA_RANDOM_DELAY,
"To preserve security, MAX_GETDATA_RANDOM_DELAY should not exceed INBOUND_PEER_DELAY");
/** Limit to avoid sending big packets. Not used in processing incoming GETDATA for compatibility */
static const unsigned int MAX_GETDATA_SZ = 1000;
/** Expiration time for orphan transactions in seconds */
static constexpr int64_t ORPHAN_TX_EXPIRE_TIME = 20 * 60;
/** Minimum time between orphan transactions expire time checks in seconds */
static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL = 5 * 60;
/** How long to cache transactions in mapRelay for normal relay */
static constexpr auto RELAY_TX_CACHE_TIME = 15min;
/** How long a transaction has to be in the mempool before it can unconditionally be relayed (even when not in mapRelay). */
static constexpr auto UNCONDITIONAL_RELAY_DELAY = 2min;
/** Headers download timeout.
* Timeout = base + per_header * (expected number of headers) */
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
/** Protect at least this many outbound peers from disconnection due to slow/
* behind headers chain.
*/
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
/** Timeout for (unprotected) outbound peers to sync to our chainwork, in seconds */
static constexpr int64_t CHAIN_SYNC_TIMEOUT = 20 * 60; // 20 minutes
/** How frequently to check for stale tips */
static constexpr auto STALE_CHECK_INTERVAL{150s}; // 2.5 minutes (~block interval)
/** How frequently to check for extra outbound peers and disconnect */
static constexpr auto EXTRA_PEER_CHECK_INTERVAL{45s};
/** Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict */
static constexpr std::chrono::seconds MINIMUM_CONNECT_TIME{30};
/** SHA256("main address relay")[0:8] */
static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
/// Age after which a stale block will no longer be served if requested as
/// protection against fingerprinting. Set to one month, denominated in seconds.
static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
/// Age after which a block is considered historical for purposes of rate
/// limiting block relay. Set to one week, denominated in seconds.
static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
/** Time between pings automatically sent out for latency probing and keepalive */
static constexpr std::chrono::minutes PING_INTERVAL{2};
/** The maximum number of entries in a locator */
static const unsigned int MAX_LOCATOR_SZ = 101;
/** Number of blocks that can be requested at any given time from a single peer. */
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
/** Time during which a peer must stall block download progress before being disconnected. */
static constexpr auto BLOCK_STALLING_TIMEOUT = 2s;
/** Maximum depth of blocks we're willing to serve as compact blocks to peers
* when requested. For older blocks, a regular BLOCK response will be sent. */
static const int MAX_CMPCTBLOCK_DEPTH = 5;
/** Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for. */
static const int MAX_BLOCKTXN_DEPTH = 10;
/** Size of the "block download window": how far ahead of our current height do we fetch?
* Larger windows tolerate larger download speed differences between peer, but increase the potential
* degree of disordering of blocks on disk (which make reindexing and pruning harder). We'll probably
* want to make this a per-peer adaptive value at some point. */
static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
/** Block download timeout base, expressed in multiples of the block interval (i.e. 10 min) */
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1;
/** Additional block download timeout per parallel downloading peer (i.e. 5 min) */
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5;
/** Maximum number of headers to announce when relaying blocks with headers message.*/
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
/** Maximum number of unconnecting headers announcements before DoS score */
static const int MAX_UNCONNECTING_HEADERS = 10;
/** Minimum blocks required to signal NODE_NETWORK_LIMITED */
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
/** Average delay between local address broadcasts */
static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL = 24h;
/** Average delay between peer address broadcasts */
static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL = 30s;
/** Average delay between trickled inventory transmissions for inbound peers.
* Blocks and peers with NetPermissionFlags::NoBan permission bypass this. */
static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL = 5s;
/** Average delay between trickled inventory transmissions for outbound peers.
* Use a smaller delay as there is less privacy concern for them.
* Blocks and peers with NetPermissionFlags::NoBan permission bypass this.
* Masternode outbound peers get half this delay. */
static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL = 2s;
/** Maximum rate of inventory items to send per second.
* Limits the impact of low-fee transaction floods.
* We have 4 times smaller block times in Dash, so we need to push 4 times more invs per 1MB. */
static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
/** Maximum number of inventory items to send per transmission. */
static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_1MB_BLOCK = 4 * INVENTORY_BROADCAST_PER_SECOND * count_seconds(INBOUND_INVENTORY_BROADCAST_INTERVAL);
/** The number of most recently announced transactions a peer can request. */
static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY = 3500;
/** Verify that INVENTORY_MAX_RECENT_RELAY is enough to cache everything typically
* relayed before unconditional relay from the mempool kicks in. This is only a
* lower bound, and it should be larger to account for higher inv rate to outbound
* peers, and random variations in the broadcast mechanism. */
static_assert(INVENTORY_MAX_RECENT_RELAY >= INVENTORY_BROADCAST_PER_SECOND * UNCONDITIONAL_RELAY_DELAY / std::chrono::seconds{1}, "INVENTORY_RELAY_MAX too low");
/** Maximum number of compact filters that may be requested with one getcfilters. See BIP 157. */
static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
/** Maximum number of cf hashes that may be requested with one getcfheaders. See BIP 157. */
static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
/** the maximum percentage of addresses from our addrman to return in response to a getaddr message. */
static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
/** The maximum number of address records permitted in an ADDR message. */
static constexpr size_t MAX_ADDR_TO_SEND{1000};
/** The maximum rate of address records we're willing to process on average. Can be bypassed using
* the NetPermissionFlags::Addr permission. */
static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1};
/** The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND
* based increments won't go above this, but the MAX_ADDR_TO_SEND increment following GETADDR
* is exempt from this limit). */
static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET{MAX_ADDR_TO_SEND};
/** The compactblocks version we support. See BIP 152. */
static constexpr uint64_t CMPCTBLOCKS_VERSION{1};
struct COrphanTx {
// When modifying, adapt the copy of this definition in tests/DoS_tests.
CTransactionRef tx;
NodeId fromPeer;
int64_t nTimeExpire;
size_t list_pos;
size_t nTxSize;
};
/** Guards orphan transactions and extra txs for compact blocks */
RecursiveMutex g_cs_orphans;
/** Map from txid to orphan transaction record. Limited by
* -maxorphantx/DEFAULT_MAX_ORPHAN_TRANSACTIONS */
std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans);
size_t nMapOrphanTransactionsSize = 0;
void EraseOrphansFor(NodeId peer);
// Internal stuff
namespace {
/** Blocks that are in flight, and that are in the queue to be downloaded. */
struct QueuedBlock {
uint256 hash;
const CBlockIndex* pindex; //!< Optional.
bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request.
std::unique_ptr<PartiallyDownloadedBlock> partialBlock; //!< Optional, used for CMPCTBLOCK downloads
};
/**
* Data structure for an individual peer. This struct is not protected by
* cs_main since it does not contain validation-critical data.
*
* Memory is owned by shared pointers and this object is destructed when
* the refcount drops to zero.
*
* Mutexes inside this struct must not be held when locking m_peer_mutex.
*
* TODO: move most members from CNodeState to this structure.
* TODO: move remaining application-layer data members from CNode to this structure.
*/
struct Peer {
/** Same id as the CNode object for this peer */
const NodeId m_id{0};
/** Protects misbehavior data members */
Mutex m_misbehavior_mutex;
/** Accumulated misbehavior score for this peer */
int m_misbehavior_score GUARDED_BY(m_misbehavior_mutex){0};
/** Whether this peer should be disconnected and marked as discouraged (unless it has NetPermissionFlags::NoBan permission). */
bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
/** Protects block inventory data members */
Mutex m_block_inv_mutex;
/** List of blocks that we'll anounce via an `inv` message.
* There is no final sorting before sending, as they are always sent
* immediately and in the order requested. */
std::vector<uint256> m_blocks_for_inv_relay GUARDED_BY(m_block_inv_mutex);
/** Unfiltered list of blocks that we'd like to announce via a `headers`
* message. If we can't announce via a `headers` message, we'll fall back to
* announcing via `inv`. */
std::vector<uint256> m_blocks_for_headers_relay GUARDED_BY(m_block_inv_mutex);
/** The final block hash that we sent in an `inv` message to this peer.
* When the peer requests this block, we send an `inv` message to trigger
* the peer to request the next sequence of block hashes.
* Most peers use headers-first syncing, which doesn't use this mechanism */
uint256 m_continuation_block GUARDED_BY(m_block_inv_mutex) {};
/** This peer's reported block height when we connected */
std::atomic<int> m_starting_height{-1};
/** The pong reply we're expecting, or 0 if no pong expected. */
std::atomic<uint64_t> m_ping_nonce_sent{0};
/** When the last ping was sent, or 0 if no ping was ever sent */
std::atomic<std::chrono::microseconds> m_ping_start{0us};
/** Whether a ping has been requested by the user */
std::atomic<bool> m_ping_queued{false};
struct TxRelay {
mutable RecursiveMutex m_bloom_filter_mutex;
// We use m_relay_txs for two purposes -
// a) it allows us to not relay tx invs before receiving the peer's version message
// b) the peer may tell us in its version message that we should not relay tx invs
// unless it loads a bloom filter.
bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false};
std::unique_ptr<CBloomFilter> m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex) GUARDED_BY(m_bloom_filter_mutex){nullptr};
mutable RecursiveMutex m_tx_inventory_mutex;
// inventory based relay
CRollingBloomFilter m_tx_inventory_known_filter GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001};
// Set of transaction ids we still have to announce.
// They are sorted by the mempool before relay, so the order is not important.
std::set<uint256> m_tx_inventory_to_send GUARDED_BY(m_tx_inventory_mutex);
// List of non-tx/non-block inventory items
std::vector<CInv> vInventoryOtherToSend GUARDED_BY(m_tx_inventory_mutex);
// Used for BIP35 mempool sending, also protected by m_tx_inventory_mutex
bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false};
// Last time a "MEMPOOL" request was serviced.
std::atomic<std::chrono::seconds> m_last_mempool_req{0s};
std::chrono::microseconds m_next_inv_send_time{0};
};
// in bitcoin: m_tx_relay == nullptr if we're not relaying transactions with this peer
// in dash: m_tx_relay should never be nullptr, we don't relay transactions if
// `IsBlockOnlyConn() == true` is instead
std::unique_ptr<TxRelay> m_tx_relay{std::make_unique<TxRelay>()};
/** A vector of addresses to send to the peer, limited to MAX_ADDR_TO_SEND. */
std::vector<CAddress> m_addrs_to_send;
/** Probabilistic filter to track recent addr messages relayed with this
* peer. Used to avoid relaying redundant addresses to this peer.
*
* We initialize this filter for outbound peers (other than
* block-relay-only connections) or when an inbound peer sends us an
* address related message (ADDR, ADDRV2, GETADDR).
*
* Presence of this filter must correlate with m_addr_relay_enabled.
**/
std::unique_ptr<CRollingBloomFilter> m_addr_known;
/** Whether we are participating in address relay with this connection.
*
* We set this bool to true for outbound peers (other than
* block-relay-only connections), or when an inbound peer sends us an
* address related message (ADDR, ADDRV2, GETADDR).
*
* We use this bool to decide whether a peer is eligible for gossiping
* addr messages. This avoids relaying to peers that are unlikely to
* forward them, effectively blackholing self announcements. Reasons
* peers might support addr relay on the link include that they connected
* to us as a block-relay-only peer or they are a light client.
*
* This field must correlate with whether m_addr_known has been
* initialized.*/
std::atomic_bool m_addr_relay_enabled{false};
/** Whether a Peer can only be relayed blocks */
const bool m_block_relay_only{false};
/** Whether a getaddr request to this peer is outstanding. */
bool m_getaddr_sent{false};
/** Guards address sending timers. */
mutable Mutex m_addr_send_times_mutex;
/** Time point to send the next ADDR message to this peer. */
std::chrono::microseconds m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
/** Time point to possibly re-announce our local address to this peer. */
std::chrono::microseconds m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
/** Whether the peer has signaled support for receiving ADDRv2 (BIP155)
* messages, indicating a preference to receive ADDRv2 instead of ADDR ones. */
std::atomic_bool m_wants_addrv2{false};
/** Whether this peer has already sent us a getaddr message. */
bool m_getaddr_recvd{false};
/** Number of addresses that can be processed from this peer. Start at 1 to
* permit self-announcement. */
double m_addr_token_bucket{1.0};
/** When m_addr_token_bucket was last updated */
std::chrono::microseconds m_addr_token_timestamp{GetTime<std::chrono::microseconds>()};
/** Total number of addresses that were dropped due to rate limiting. */
std::atomic<uint64_t> m_addr_rate_limited{0};
/** Total number of addresses that were processed (excludes rate-limited ones). */
std::atomic<uint64_t> m_addr_processed{0};
/** Set of txids to reconsider once their parent transactions have been accepted **/
std::set<uint256> m_orphan_work_set GUARDED_BY(g_cs_orphans);
/** Protects m_getdata_requests **/
Mutex m_getdata_requests_mutex;
/** Work queue of items requested by this peer **/
std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
explicit Peer(NodeId id, bool block_relay_only)
: m_id(id)
, m_tx_relay(std::make_unique<TxRelay>())
, m_block_relay_only{block_relay_only}
{}
};
using PeerRef = std::shared_ptr<Peer>;
class PeerManagerImpl final : public PeerManager
{
public:
PeerManagerImpl(const CChainParams& chainparams, CConnman& connman, AddrMan& addrman, BanMan* banman,
ChainstateManager& chainman, CTxMemPool& pool,
CMasternodeMetaMan& mn_metaman, CMasternodeSync& mn_sync,
CGovernanceManager& govman, CSporkManager& sporkman,
const CActiveMasternodeManager* const mn_activeman,
const std::unique_ptr<CDeterministicMNManager>& dmnman,
const std::unique_ptr<CJContext>& cj_ctx,
const std::unique_ptr<LLMQContext>& llmq_ctx,
bool ignore_incoming_txs);
/** Overridden from CValidationInterface. */
void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex);
void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override
EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void BlockChecked(const CBlock& block, const BlockValidationState& state) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) override;
/** Implement NetEventsInterface */
void InitializeNode(CNode* pnode) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
bool ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex);
bool SendMessages(CNode* pto) override EXCLUSIVE_LOCKS_REQUIRED(pto->cs_sendProcessing)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex);
/** Implement PeerManager */
void StartScheduledTasks(CScheduler& scheduler) override;
void CheckForStaleTipAndEvictPeers() override;
std::optional<std::string> FetchBlock(NodeId peer_id, const CBlockIndex& block_index) override;
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
bool IgnoresIncomingTxs() override { return m_ignore_incoming_txs; }
void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);;
void PushInventory(NodeId nodeid, const CInv& inv) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void RelayInv(CInv &inv, const int minProtoVersion) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void RelayInvFiltered(CInv &inv, const CTransaction &relatedTx, const int minProtoVersion) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void RelayInvFiltered(CInv &inv, const uint256 &relatedTxHash, const int minProtoVersion) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void RelayTransaction(const uint256& txid) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void SetBestHeight(int height) override { m_best_height = height; };
void Misbehaving(const NodeId pnode, const int howmuch, const std::string& message = "") override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv,
const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex);
bool IsBanned(NodeId pnode) override EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex);
bool IsInvInFilter(NodeId nodeid, const uint256& hash) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
private:
/** Helper to process result of external handlers of message */
void ProcessPeerMsgRet(const PeerMsgRet& ret, CNode& pfrom) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/** Consider evicting an outbound peer based on the amount of time they've been behind our tip */
void ConsiderEviction(CNode& pto, int64_t time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** If we have extra outbound peers, try to disconnect the one with the oldest block announcement */
void EvictExtraOutboundPeers(std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Retrieve unbroadcast transactions from the mempool and reattempt sending to peers */
void ReattemptInitialBroadcast(CScheduler& scheduler) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/** Get a shared pointer to the Peer object.
* May return an empty shared_ptr if the Peer object can't be found. */
PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/** Get a shared pointer to the Peer object and remove it from m_peer_map.
* May return an empty shared_ptr if the Peer object can't be found. */
PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/**
* Potentially mark a node discouraged based on the contents of a BlockValidationState object
*
* @param[in] via_compact_block this bool is passed in because net_processing should
* punish peers differently depending on whether the data was provided in a compact
* block message or not. If the compact block had a valid header, but contained invalid
* txs, the peer should not be punished. See BIP 152.
*
* @return Returns true if the peer was punished (probably disconnected)
*/
bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
bool via_compact_block, const std::string& message = "")
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/**
* Potentially ban a node based on the contents of a TxValidationState object
*
* @return Returns true if the peer was punished (probably disconnected)
*
* Changes here may need to be reflected in TxRelayMayResultInDisconnect().
*/
bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, const std::string& message = "")
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/** Maybe disconnect a peer and discourage future connections from its address.
*
* @param[in] pnode The node to check.
* @param[in] peer The peer object to check.
* @return True if the peer was marked for disconnection in this function
*/
bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer);
void ProcessOrphanTx(std::set<uint256>& orphan_work_set) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/** Process a single headers message from a peer. */
void ProcessHeadersMessage(CNode& pfrom, const Peer& peer,
const std::vector<CBlockHeader>& headers,
bool via_compact_block)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
void SendBlockTransactions(CNode& pfrom, const CBlock& block, const BlockTransactionsRequest& req)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/** Send a version message to a peer */
void PushNodeVersion(CNode& pnode, const Peer& peer);
/** Send a ping message every PING_INTERVAL or if requested via RPC. May
* mark the peer to be disconnected if a ping has timed out.
* We use mockable time for ping timeouts, so setmocktime may cause pings
* to time out. */
void MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now);
/** Send `addr` messages on a regular schedule. */
void MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time);
/** Relay (gossip) an address to a few randomly chosen nodes.
*
* @param[in] originator The id of the peer that sent us the address. We don't want to relay it back.
* @param[in] addr Address to relay.
* @param[in] fReachable Whether the address' network is reachable. We relay unreachable
* addresses less.
*/
void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
const CChainParams& m_chainparams;
CConnman& m_connman;
AddrMan& m_addrman;
/** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */
BanMan* const m_banman;
ChainstateManager& m_chainman;
CTxMemPool& m_mempool;
const std::unique_ptr<CDeterministicMNManager>& m_dmnman;
const std::unique_ptr<CJContext>& m_cj_ctx;
const std::unique_ptr<LLMQContext>& m_llmq_ctx;
CMasternodeMetaMan& m_mn_metaman;
CMasternodeSync& m_mn_sync;
CGovernanceManager& m_govman;
CSporkManager& m_sporkman;
const CActiveMasternodeManager* const m_mn_activeman;
/** The height of the best chain */
std::atomic<int> m_best_height{-1};
/** Next time to check for stale tip */
std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s};
/** Whether this node is running in blocks only mode */
const bool m_ignore_incoming_txs;
/** Whether we've completed initial sync yet, for determining when to turn
* on extra block-relay-only peers. */
bool m_initial_sync_finished GUARDED_BY(cs_main){false};
/** Protects m_peer_map. This mutex must not be locked while holding a lock
* on any of the mutexes inside a Peer object. */
mutable Mutex m_peer_mutex;
/**
* Map of all Peer objects, keyed by peer id. This map is protected
* by the m_peer_mutex. Once a shared pointer reference is
* taken, the lock may be released. Individual fields are protected by
* their own locks.
*/
std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex);
/** Check whether the last unknown block a peer advertised is not yet known. */
void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Update tracking information about which blocks a peer is assumed to have. */
void UpdateBlockAvailability(NodeId nodeid, const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* To prevent fingerprinting attacks, only send blocks/headers outside of the
* active chain if they are no more than a month older (both in time, and in
* best equivalent proof of work) than the best header chain we know about and
* we fully-validated them at some point.
*/
bool BlockRequestAllowed(const CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
bool AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
void ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv, llmq::CInstantSendManager& isman);
/**
* Validation logic for compact filters request handling.
*
* May disconnect from the peer in the case of a bad request.
*
* @param[in] peer The peer that we received the request from
* @param[in] filter_type The filter type the request is for. Must be basic filters.
* @param[in] start_height The start height for the request
* @param[in] stop_hash The stop_hash for the request
* @param[in] max_height_diff The maximum number of items permitted to request, as specified in BIP 157
* @param[out] stop_index The CBlockIndex for the stop_hash block, if the request can be serviced.
* @param[out] filter_index The filter index, if the request can be serviced.
* @return True if the request can be serviced.
*/
bool PrepareBlockFilterRequest(CNode& peer,
BlockFilterType filter_type, uint32_t start_height,
const uint256& stop_hash, uint32_t max_height_diff,
const CBlockIndex*& stop_index,
BlockFilterIndex*& filter_index);
/**
* Handle a cfilters request.
*
* May disconnect from the peer in the case of a bad request.
*
* @param[in] peer The peer that we received the request from
* @param[in] vRecv The raw message received
*/
void ProcessGetCFilters(CNode& peer, CDataStream& vRecv);
/**
* Handle a cfheaders request.
*
* May disconnect from the peer in the case of a bad request.
*
* @param[in] peer The peer that we received the request from
* @param[in] vRecv The raw message received
*/
void ProcessGetCFHeaders(CNode& peer, CDataStream& vRecv);
/**
* Handle a getcfcheckpt request.
*
* May disconnect from the peer in the case of a bad request.
*
* @param[in] peer The peer that we received the request from
* @param[in] vRecv The raw message received
*/
void ProcessGetCFCheckPt(CNode& peer, CDataStream& vRecv);
/** Checks if address relay is permitted with peer. If needed, initializes
* the m_addr_known bloom filter and sets m_addr_relay_enabled to true.
*
* @return True if address relay is enabled with peer
* False if address relay is disallowed
*/
bool SetupAddressRelay(const CNode& node, Peer& peer);
/** Number of nodes with fSyncStarted. */
int nSyncStarted GUARDED_BY(cs_main) = 0;
/**
* Sources of received blocks, saved to be able punish them when processing
* happens afterwards.
* Set mapBlockSource[hash].second to false if the node should not be
* punished if the block is invalid.
*/
std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main);
/** Number of outbound peers with m_chain_sync.m_protect. */
int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
bool AlreadyHave(const CInv& inv)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_recent_confirmed_transactions_mutex);
/**
* Filter for transactions that were recently rejected by
* AcceptToMemoryPool. These are not rerequested until the chain tip
* changes, at which point the entire filter is reset.
*
* Without this filter we'd be re-requesting txs from each of our peers,
* increasing bandwidth consumption considerably. For instance, with 100
* peers, half of which relay a tx we don't accept, that might be a 50x
* bandwidth increase. A flooding attacker attempting to roll-over the
* filter using minimum-sized, 60byte, transactions might manage to send
* 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
* two minute window to send invs to us.
*
* Decreasing the false positive rate is fairly cheap, so we pick one in a
* million to make it highly unlikely for users to have issues with this
* filter.
*
* Memory used: 1.3MB
*/
CRollingBloomFilter m_recent_rejects GUARDED_BY(::cs_main){120'000, 0.000'001};
uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
/*
* Filter for transactions that have been recently confirmed.
* We use this to avoid requesting transactions that have already been
* confirnmed.
*
* Blocks don't typically have more than 4000 transactions, so this should
* be at least six blocks (~1 hr) worth of transactions that we can store,
* inserting both a txid and wtxid for every observed transaction.
* If the number of transactions appearing in a block goes up, or if we are
* seeing getdata requests more than an hour after initial announcement, we
* can increase this number.
* The false positive rate of 1/1M should come out to less than 1
* transaction per day that would be inadvertently ignored (which is the
* same probability that we have in the reject filter).
*/
Mutex m_recent_confirmed_transactions_mutex;
CRollingBloomFilter m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex){48'000, 0.000'001};
/* Returns a bool indicating whether we requested this block.
* Also used if a block was /not/ received and timed out or started with another peer
*/
bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/* Mark a block as in flight
* Returns false, still setting pit, if the block was already in flight from the same peer
* pit will only be valid as long as the same cs_main lock is being held
*/
bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex* pindex = nullptr, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
* at most count entries.
*/
void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> > mapBlocksInFlight GUARDED_BY(cs_main);
/** When our tip was last updated. */
std::atomic<std::chrono::seconds> m_last_tip_update{0s};
/** Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). */
CTransactionRef FindTxForGetData(const CNode* peer, const uint256& txid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main);
void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc) LOCKS_EXCLUDED(cs_main) EXCLUSIVE_LOCKS_REQUIRED(peer.m_getdata_requests_mutex);
void ProcessBlock(CNode& pfrom, const std::shared_ptr<const CBlock>& pblock, bool fForceProcessing);
/** Relay map (txid -> CTransactionRef) */
typedef std::map<uint256, CTransactionRef> MapRelay;
MapRelay mapRelay GUARDED_BY(cs_main);
/** Expiration-time ordered list of (expire time, relay map entry) pairs. */
std::deque<std::pair<std::chrono::microseconds, MapRelay::iterator>> g_relay_expiration GUARDED_BY(cs_main);
/**
* When a peer sends us a valid block, instruct it to announce blocks to us
* using CMPCTBLOCK if possible by adding its nodeid to the end of
* lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by
* removing the first element if necessary.
*/
void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Stack of nodes which we have set to announce using compact blocks */
std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
/** Number of peers from which we're downloading blocks. */
int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0;
};
} // namespace
namespace {
/** Number of preferable block download peers. */
int nPreferredDownload GUARDED_BY(cs_main) = 0;
struct IteratorComparator
{
template<typename I>
bool operator()(const I& a, const I& b) const
{
return &(*a) < &(*b);
}
};
/** Index from the parents' COutPoint into the mapOrphanTransactions. Used
* to remove orphan transactions from the mapOrphanTransactions */
std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans);
/** Orphan transactions in vector for quick random eviction */
std::vector<std::map<uint256, COrphanTx>::iterator> g_orphan_list GUARDED_BY(g_cs_orphans);
/** Orphan/conflicted/etc transactions that are kept for compact block reconstruction.
* The last -blockreconstructionextratxn/DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN of
* these are kept in a ring buffer */
static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
/** Offset into vExtraTxnForCompact to insert the next tx */
static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
} // namespace
namespace {
/**
* Maintain validation-specific state about nodes, protected by cs_main, instead
* by CNode's own locks. This simplifies asynchronous operation, where
* processing of incoming data is done after the ProcessMessage call returns,
* and we're no longer holding the node's locks.
*/
struct CNodeState {
//! The best known block we know this peer has announced.
const CBlockIndex* pindexBestKnownBlock{nullptr};
//! The hash of the last unknown block this peer has announced.
uint256 hashLastUnknownBlock{};
//! The last full block we both have.
const CBlockIndex* pindexLastCommonBlock{nullptr};
//! The best header we have sent our peer.
const CBlockIndex* pindexBestHeaderSent{nullptr};
//! Length of current-streak of unconnecting headers announcements
int nUnconnectingHeaders{0};
//! Whether we've started headers synchronization with this peer.
bool fSyncStarted{false};
//! When to potentially disconnect peer for stalling headers download
std::chrono::microseconds m_headers_sync_timeout{0us};
//! Since when we're stalling block download progress (in microseconds), or 0.
std::chrono::microseconds m_stalling_since{0us};
std::list<QueuedBlock> vBlocksInFlight;
//! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty.
std::chrono::microseconds m_downloading_since{0us};
int nBlocksInFlight{0};
int nBlocksInFlightValidHeaders{0};
//! Whether we consider this a preferred download peer.
bool fPreferredDownload{false};
//! Whether this peer wants invs or headers (when possible) for block announcements.
bool fPreferHeaders{false};
//! Whether this peer wants invs or compressed headers (when possible) for block announcements.
bool fPreferHeadersCompressed{false};
/** Whether this peer wants invs or cmpctblocks (when possible) for block announcements. */
bool m_requested_hb_cmpctblocks{false};
/** Whether this peer will send us cmpctblocks if we request them. */
bool m_provides_cmpctblocks{false};
/** State used to enforce CHAIN_SYNC_TIMEOUT and EXTRA_PEER_CHECK_INTERVAL logic.
*
* Both are only in effect for outbound, non-manual, non-protected connections.
* Any peer protected (m_protect = true) is not chosen for eviction. A peer is
* marked as protected if all of these are true:
* - its connection type is IsBlockOnlyConn() == false
* - it gave us a valid connecting header
* - we haven't reached MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT yet
* - its chain tip has at least as much work as ours
*
* CHAIN_SYNC_TIMEOUT: if a peer's best known block has less work than our tip,
* set a timeout CHAIN_SYNC_TIMEOUT seconds in the future:
* - If at timeout their best known block now has more work than our tip
* when the timeout was set, then either reset the timeout or clear it
* (after comparing against our current tip's work)
* - If at timeout their best known block still has less work than our
* tip did when the timeout was set, then send a getheaders message,
* and set a shorter timeout, HEADERS_RESPONSE_TIME seconds in future.
* If their best known block is still behind when that new timeout is
* reached, disconnect.
*
* EXTRA_PEER_CHECK_INTERVAL: after each interval, if we have too many outbound peers,
* drop the outbound one that least recently announced us a new block.
*/
struct ChainSyncTimeoutState {
//! A timeout used for checking whether our peer has sufficiently synced
int64_t m_timeout{0};
//! A header with the work we require on our peer's chain
const CBlockIndex* m_work_header{nullptr};
//! After timeout is reached, set to true after sending getheaders
bool m_sent_getheaders{false};
//! Whether this peer is protected from disconnection due to a bad/slow chain
bool m_protect{false};
};
ChainSyncTimeoutState m_chain_sync;
//! Time of last new block announcement
int64_t m_last_block_announcement{0};
/*
* State associated with objects download.
*
* Tx download algorithm:
*
* When inv comes in, queue up (process_time, inv) inside the peer's
* CNodeState (m_object_process_time) as long as m_object_announced for the peer
* isn't too big (MAX_PEER_OBJECT_ANNOUNCEMENTS).
*
* The process_time for a objects is set to nNow for outbound peers,
* nNow + 2 seconds for inbound peers. This is the time at which we'll
* consider trying to request the objects from the peer in
* SendMessages(). The delay for inbound peers is to allow outbound peers
* a chance to announce before we request from inbound peers, to prevent
* an adversary from using inbound connections to blind us to a
* objects (InvBlock).
*
* When we call SendMessages() for a given peer,
* we will loop over the objects in m_object_process_time, looking
* at the objects whose process_time <= nNow. We'll request each
* such objects that we don't have already and that hasn't been
* requested from another peer recently, up until we hit the
* MAX_PEER_OBJECT_IN_FLIGHT limit for the peer. Then we'll update
* g_already_asked_for for each requested inv, storing the time of the
* GETDATA request. We use g_already_asked_for to coordinate objects
* requests amongst our peers.
*
* For objects that we still need but we have already recently
* requested from some other peer, we'll reinsert (process_time, inv)
* back into the peer's m_object_process_time at the point in the future at
* which the most recent GETDATA request would time out (ie
* GetObjectInterval + the request time stored in g_already_asked_for).
* We add an additional delay for inbound peers, again to prefer
* attempting download from outbound peers first.
* We also add an extra small random delay up to 2 seconds
* to avoid biasing some peers over others. (e.g., due to fixed ordering
* of peer processing in ThreadMessageHandler).
*
* When we receive a objects from a peer, we remove the inv from the
* peer's m_object_in_flight set and from their recently announced set
* (m_object_announced). We also clear g_already_asked_for for that entry, so
* that if somehow the objects is not accepted but also not added to
* the reject filter, then we will eventually redownload from other
* peers.
*/
struct ObjectDownloadState {
/* Track when to attempt download of announced objects (process
* time in micros -> inv)
*/
std::multimap<std::chrono::microseconds, CInv> m_object_process_time;
//! Store all the objects a peer has recently announced
std::set<CInv> m_object_announced;
//! Store objects which were requested by us, with timestamp
std::map<CInv, std::chrono::microseconds> m_object_in_flight;
//! Periodically check for stuck getdata requests
std::chrono::microseconds m_check_expiry_timer{0};
};
ObjectDownloadState m_object_download;
//! Whether this peer is an inbound connection
const bool m_is_inbound;
//! A rolling bloom filter of all announced tx CInvs to this peer.
CRollingBloomFilter m_recently_announced_invs = CRollingBloomFilter{INVENTORY_MAX_RECENT_RELAY, 0.000001};
CNodeState(bool is_inbound) : m_is_inbound(is_inbound) {}
};
// Keeps track of the time (in microseconds) when transactions were requested last time
unordered_limitedmap<uint256, std::chrono::microseconds, StaticSaltedHasher> g_already_asked_for(MAX_INV_SZ, MAX_INV_SZ * 2);
unordered_limitedmap<uint256, std::chrono::microseconds, StaticSaltedHasher> g_erased_object_requests(MAX_INV_SZ, MAX_INV_SZ * 2);
/** Map maintaining per-node state. */
static std::map<NodeId, CNodeState> mapNodeState GUARDED_BY(cs_main);
static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
std::map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
if (it == mapNodeState.end())
return nullptr;
return &it->second;
}
/**
* Whether the peer supports the address. For example, a peer that does not
* implement BIP155 cannot receive Tor v3 addresses because it requires
* ADDRv2 (BIP155) encoding.
*/
static bool IsAddrCompatible(const Peer& peer, const CAddress& addr)
{
return peer.m_wants_addrv2 || addr.IsAddrV1Compatible();
}
static void AddAddressKnown(Peer& peer, const CAddress& addr)
{
assert(peer.m_addr_known);
peer.m_addr_known->insert(addr.GetKey());
}
static void PushAddress(Peer& peer, const CAddress& addr, FastRandomContext& insecure_rand)
{
// Known checking here is only to save space from duplicates.
// Before sending, we'll filter it again for known addresses that were
// added after addresses were pushed.
assert(peer.m_addr_known);
if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) && IsAddrCompatible(peer, addr)) {
if (peer.m_addrs_to_send.size() >= MAX_ADDR_TO_SEND) {
peer.m_addrs_to_send[insecure_rand.randrange(peer.m_addrs_to_send.size())] = addr;
} else {
peer.m_addrs_to_send.push_back(addr);
}
}
}
static void AddKnownInv(Peer& peer, const uint256& hash)
{
// Dash always initializes m_tx_relay
assert(peer.m_tx_relay != nullptr);
LOCK(peer.m_tx_relay->m_tx_inventory_mutex);
peer.m_tx_relay->m_tx_inventory_known_filter.insert(hash);
}
static void PushInv(Peer& peer, const CInv& inv)
{
// Dash always initializes m_tx_relay
assert(peer.m_tx_relay != nullptr);
ASSERT_IF_DEBUG(inv.type != MSG_BLOCK);
if (inv.type == MSG_BLOCK) {
LogPrintf("%s -- WARNING: using PushInv for BLOCK inv, peer=%d\n", __func__, peer.m_id);
return;
}
LOCK(peer.m_tx_relay->m_tx_inventory_mutex);
if (peer.m_tx_relay->m_tx_inventory_known_filter.contains(inv.hash)) {
LogPrint(BCLog::NET, "%s -- skipping known inv: %s peer=%d\n", __func__, inv.ToString(), peer.m_id);
return;
}
LogPrint(BCLog::NET, "%s -- adding new inv: %s peer=%d\n", __func__, inv.ToString(), peer.m_id);
if (inv.type == MSG_TX || inv.type == MSG_DSTX) {
peer.m_tx_relay->m_tx_inventory_to_send.insert(inv.hash);
return;
}
peer.m_tx_relay->vInventoryOtherToSend.push_back(inv);
}
static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
nPreferredDownload -= state->fPreferredDownload;
// Whether this node should be marked as a preferred download node.
state->fPreferredDownload = (!node.IsInboundConn() || node.HasPermission(NetPermissionFlags::NoBan)) && !node.IsAddrFetchConn() && !node.fClient;
nPreferredDownload += state->fPreferredDownload;
}
bool PeerManagerImpl::MarkBlockAsReceived(const uint256& hash)
{
std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
if (itInFlight != mapBlocksInFlight.end()) {
CNodeState *state = State(itInFlight->second.first);
assert(state != nullptr);