diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 00000000000000..99262ca8942e60
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,9 @@
+.dockerignore
+.git/
+.github/
+.gitignore
+.idea/
+README.md
+Dockerfile
+f
+target/
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 95e3fb34445ee0..91cf374c791c70 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -3,14 +3,15 @@
# Please see the documentation for all configuration options:
# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
-version: 2
-updates:
-- package-ecosystem: cargo
- directory: "/"
- schedule:
- interval: daily
- time: "01:00"
- timezone: America/Los_Angeles
- #labels:
- # - "automerge"
- open-pull-requests-limit: 6
+# NOTE: Jito-Solana ignores this as we pull in upstream dependabot merges
+#version: 2
+#updates:
+#- package-ecosystem: cargo
+# directory: "/"
+# schedule:
+# interval: daily
+# time: "01:00"
+# timezone: America/Los_Angeles
+# #labels:
+# # - "automerge"
+# open-pull-requests-limit: 6
diff --git a/.github/workflows/cargo.yml b/.github/workflows/cargo.yml
index d0bad722e0d3a6..d9aebe1830e831 100644
--- a/.github/workflows/cargo.yml
+++ b/.github/workflows/cargo.yml
@@ -34,6 +34,8 @@ jobs:
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
+ with:
+ submodules: 'recursive'
- uses: mozilla-actions/sccache-action@v0.0.3
with:
@@ -56,6 +58,8 @@ jobs:
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
+ with:
+ submodules: 'recursive'
- uses: mozilla-actions/sccache-action@v0.0.3
with:
diff --git a/.github/workflows/client-targets.yml b/.github/workflows/client-targets.yml
index 97118918ef8442..aacb52629d2a5a 100644
--- a/.github/workflows/client-targets.yml
+++ b/.github/workflows/client-targets.yml
@@ -32,6 +32,8 @@ jobs:
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
+ with:
+ submodules: 'recursive'
- run: cargo install cargo-ndk@2.12.2
@@ -56,6 +58,8 @@ jobs:
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
+ with:
+ submodules: 'recursive'
- name: Setup Rust
run: |
diff --git a/.github/workflows/crate-check.yml b/.github/workflows/crate-check.yml
index a47e7cde5fb217..9b57d633ade55f 100644
--- a/.github/workflows/crate-check.yml
+++ b/.github/workflows/crate-check.yml
@@ -18,6 +18,7 @@ jobs:
- uses: actions/checkout@v3
with:
fetch-depth: 0
+ submodules: 'recursive'
- name: Get commit range (push)
if: ${{ github.event_name == 'push' }}
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
index fb2096bd33b185..e5ac907ea1f82f 100644
--- a/.github/workflows/docs.yml
+++ b/.github/workflows/docs.yml
@@ -22,6 +22,7 @@ jobs:
uses: actions/checkout@v3
with:
fetch-depth: 0
+ submodules: 'recursive'
- name: Get commit range (push)
if: ${{ github.event_name == 'push' }}
@@ -77,6 +78,8 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v3
+ with:
+ submodules: 'recursive'
- name: Setup Node
uses: actions/setup-node@v3
diff --git a/.github/workflows/downstream-project-spl.yml b/.github/workflows/downstream-project-spl.yml
index 6afd398f43accb..2768e253f10dfc 100644
--- a/.github/workflows/downstream-project-spl.yml
+++ b/.github/workflows/downstream-project-spl.yml
@@ -37,6 +37,8 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
+ with:
+ submodules: 'recursive'
- shell: bash
run: |
@@ -86,6 +88,8 @@ jobs:
]
steps:
- uses: actions/checkout@v3
+ with:
+ submodules: 'recursive'
- shell: bash
run: |
@@ -137,6 +141,8 @@ jobs:
steps:
- uses: actions/checkout@v3
+ with:
+ submodules: 'recursive'
- shell: bash
run: |
diff --git a/.github/workflows/increment-cargo-version-on-release.yml b/.github/workflows/increment-cargo-version-on-release.yml
index 5592d76ca52dd1..ca55af215510cd 100644
--- a/.github/workflows/increment-cargo-version-on-release.yml
+++ b/.github/workflows/increment-cargo-version-on-release.yml
@@ -11,6 +11,8 @@ jobs:
steps:
- name: Checkout Repository
uses: actions/checkout@v3
+ with:
+ submodules: 'recursive'
# This script confirms two assumptions:
# 1) Tag should be branch.
diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
index 98dc697920262c..fc98a5895b2ee7 100644
--- a/.github/workflows/release-artifacts.yml
+++ b/.github/workflows/release-artifacts.yml
@@ -26,6 +26,7 @@ jobs:
with:
ref: master
fetch-depth: 0
+ submodules: 'recursive'
- name: Setup Rust
shell: bash
diff --git a/.gitignore b/.gitignore
index 3167a9d7207b21..f891833b1556d3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,7 +4,7 @@
/solana-release.tar.bz2
/solana-metrics/
/solana-metrics.tar.bz2
-/target/
+**/target/
/test-ledger/
**/*.rs.bk
@@ -27,7 +27,11 @@ log-*/
# fetch-spl.sh artifacts
/spl-genesis-args.sh
/spl_*.so
+/jito_*.so
.DS_Store
# scripts that may be generated by cargo *-bpf commands
**/cargo-*-bpf-child-script-*.sh
+
+.env
+docker-output/
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 00000000000000..e31fc7fccd923c
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,9 @@
+[submodule "anchor"]
+ path = anchor
+ url = https://github.com/jito-foundation/anchor.git
+[submodule "jito-programs"]
+ path = jito-programs
+ url = https://github.com/jito-foundation/jito-programs.git
+[submodule "jito-protos/protos"]
+ path = jito-protos/protos
+ url = https://github.com/jito-labs/mev-protos.git
diff --git a/Cargo.lock b/Cargo.lock
index 3d4df12d8d2886..52ce32b82dfcf7 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -126,6 +126,145 @@ dependencies = [
"alloc-no-stdlib",
]
+[[package]]
+name = "anchor-attribute-access-control"
+version = "0.24.2"
+dependencies = [
+ "anchor-syn",
+ "anyhow",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
+ "regex",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "anchor-attribute-account"
+version = "0.24.2"
+dependencies = [
+ "anchor-syn",
+ "anyhow",
+ "bs58 0.4.0",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
+ "rustversion",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "anchor-attribute-constant"
+version = "0.24.2"
+dependencies = [
+ "anchor-syn",
+ "proc-macro2 1.0.69",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "anchor-attribute-error"
+version = "0.24.2"
+dependencies = [
+ "anchor-syn",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "anchor-attribute-event"
+version = "0.24.2"
+dependencies = [
+ "anchor-syn",
+ "anyhow",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "anchor-attribute-interface"
+version = "0.24.2"
+dependencies = [
+ "anchor-syn",
+ "anyhow",
+ "heck 0.3.3",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "anchor-attribute-program"
+version = "0.24.2"
+dependencies = [
+ "anchor-syn",
+ "anyhow",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "anchor-attribute-state"
+version = "0.24.2"
+dependencies = [
+ "anchor-syn",
+ "anyhow",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "anchor-derive-accounts"
+version = "0.24.2"
+dependencies = [
+ "anchor-syn",
+ "anyhow",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "anchor-lang"
+version = "0.24.2"
+dependencies = [
+ "anchor-attribute-access-control",
+ "anchor-attribute-account",
+ "anchor-attribute-constant",
+ "anchor-attribute-error",
+ "anchor-attribute-event",
+ "anchor-attribute-interface",
+ "anchor-attribute-program",
+ "anchor-attribute-state",
+ "anchor-derive-accounts",
+ "arrayref",
+ "base64 0.13.1",
+ "bincode",
+ "borsh 0.10.3",
+ "bytemuck",
+ "solana-program",
+ "thiserror",
+]
+
+[[package]]
+name = "anchor-syn"
+version = "0.24.2"
+dependencies = [
+ "anyhow",
+ "bs58 0.3.1",
+ "heck 0.3.3",
+ "proc-macro2 1.0.69",
+ "proc-macro2-diagnostics",
+ "quote 1.0.33",
+ "serde",
+ "serde_json",
+ "sha2 0.9.9",
+ "syn 1.0.109",
+ "thiserror",
+]
+
[[package]]
name = "android-tzdata"
version = "0.1.1"
@@ -156,12 +295,55 @@ dependencies = [
"winapi 0.3.9",
]
+[[package]]
+name = "anstream"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163"
+dependencies = [
+ "anstyle",
+ "anstyle-parse",
+ "anstyle-query",
+ "anstyle-wincon",
+ "colorchoice",
+ "is-terminal",
+ "utf8parse",
+]
+
[[package]]
name = "anstyle"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd"
+[[package]]
+name = "anstyle-parse"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c"
+dependencies = [
+ "utf8parse",
+]
+
+[[package]]
+name = "anstyle-query"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648"
+dependencies = [
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "anstyle-wincon"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c"
+dependencies = [
+ "anstyle",
+ "windows-sys 0.48.0",
+]
+
[[package]]
name = "anyhow"
version = "1.0.75"
@@ -228,7 +410,7 @@ version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348"
dependencies = [
- "quote",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -240,8 +422,8 @@ checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565"
dependencies = [
"num-bigint 0.4.4",
"num-traits",
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -276,8 +458,8 @@ version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -337,8 +519,8 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
"synstructure",
]
@@ -349,8 +531,8 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -424,8 +606,8 @@ version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "648ed8c8d2ce5409ccd57453d9d1b214b342a0d69376a6feda1fd6cae3299308"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -435,8 +617,8 @@ version = "0.1.74"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 2.0.38",
]
@@ -586,8 +768,8 @@ dependencies = [
"lazycell",
"peeking_take_while",
"prettyplease 0.2.4",
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"regex",
"rustc-hash",
"shlex",
@@ -722,7 +904,7 @@ dependencies = [
"borsh-derive-internal 0.9.3",
"borsh-schema-derive-internal 0.9.3",
"proc-macro-crate 0.1.5",
- "proc-macro2",
+ "proc-macro2 1.0.69",
"syn 1.0.109",
]
@@ -735,7 +917,7 @@ dependencies = [
"borsh-derive-internal 0.10.3",
"borsh-schema-derive-internal 0.10.3",
"proc-macro-crate 0.1.5",
- "proc-macro2",
+ "proc-macro2 1.0.69",
"syn 1.0.109",
]
@@ -745,8 +927,8 @@ version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -756,8 +938,8 @@ version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "afb438156919598d2c7bad7e1c0adf3d26ed3840dbc010db1a882a65583ca2fb"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -767,8 +949,8 @@ version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -778,8 +960,8 @@ version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "634205cc43f74a1b9046ef87c4540ebda95696ec0f315024860cad7c5b0f5ccd"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -804,6 +986,12 @@ dependencies = [
"alloc-stdlib",
]
+[[package]]
+name = "bs58"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb"
+
[[package]]
name = "bs58"
version = "0.4.0"
@@ -884,8 +1072,8 @@ version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1aca418a974d83d40a0c1f0c5cba6ff4bc28d8df099109ca459a2118d40b6322"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -1110,7 +1298,7 @@ checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5"
dependencies = [
"atty",
"bitflags 1.3.2",
- "clap_derive",
+ "clap_derive 3.2.18",
"clap_lex 0.2.4",
"indexmap 1.9.3",
"once_cell",
@@ -1126,6 +1314,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c27cdf28c0f604ba3f512b0c9a409f8de8513e4816705deb0498b627e7c3a3fd"
dependencies = [
"clap_builder",
+ "clap_derive 4.3.12",
+ "once_cell",
]
[[package]]
@@ -1134,8 +1324,10 @@ version = "4.3.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08a9f1ab5e9f01a9b81f202e8562eb9a10de70abf9eaeac1be465c28b75aa4aa"
dependencies = [
+ "anstream",
"anstyle",
"clap_lex 0.5.0",
+ "strsim 0.10.0",
]
[[package]]
@@ -1144,13 +1336,25 @@ version = "3.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65"
dependencies = [
- "heck",
+ "heck 0.4.0",
"proc-macro-error",
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
+[[package]]
+name = "clap_derive"
+version = "4.3.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050"
+dependencies = [
+ "heck 0.4.0",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
+ "syn 2.0.38",
+]
+
[[package]]
name = "clap_lex"
version = "0.2.4"
@@ -1166,6 +1370,12 @@ version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b"
+[[package]]
+name = "colorchoice"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
+
[[package]]
name = "combine"
version = "3.8.1"
@@ -1242,9 +1452,9 @@ version = "0.2.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7f6ff08fd20f4f299298a28e2dfa8a8ba1036e6cd2460ac1de7b425d76f2500"
dependencies = [
- "proc-macro2",
- "quote",
- "unicode-xid",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
+ "unicode-xid 0.2.2",
]
[[package]]
@@ -1494,8 +1704,8 @@ checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb"
dependencies = [
"fnv",
"ident_case",
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"strsim 0.10.0",
"syn 2.0.38",
]
@@ -1507,7 +1717,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a"
dependencies = [
"darling_core",
- "quote",
+ "quote 1.0.33",
"syn 2.0.38",
]
@@ -1541,6 +1751,17 @@ version = "2.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57"
+[[package]]
+name = "default-env"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f753eb82d29277e79efc625e84aecacfd4851ee50e05a8573a4740239a77bfd3"
+dependencies = [
+ "proc-macro2 0.4.30",
+ "quote 0.6.13",
+ "syn 0.15.44",
+]
+
[[package]]
name = "der"
version = "0.5.1"
@@ -1576,8 +1797,8 @@ version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -1588,8 +1809,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "40eebddd2156ce1bb37b20bbe5151340a31828b1f2d22ba4141f3531710e38df"
dependencies = [
"convert_case",
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"rustc_version 0.3.3",
"syn 1.0.109",
]
@@ -1677,8 +1898,8 @@ version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3bf95dc3f046b9da4f2d51833c0d3547d8564ef6910f5c1ed130306a75b92886"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -1700,8 +1921,8 @@ version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 2.0.38",
]
@@ -1759,8 +1980,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f86b50932a01e7ec5c06160492ab660fb19b6bb2a7878030dd6cd68d21df9d4d"
dependencies = [
"enum-ordinalize",
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -1800,8 +2021,8 @@ version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 2.0.38",
]
@@ -1813,8 +2034,8 @@ checksum = "0b166c9e378360dd5a6666a9604bb4f54ae0cac39023ffbac425e917a2a04fef"
dependencies = [
"num-bigint 0.4.4",
"num-traits",
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -2075,8 +2296,8 @@ version = "0.3.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 2.0.38",
]
@@ -2353,6 +2574,15 @@ dependencies = [
"http",
]
+[[package]]
+name = "heck"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c"
+dependencies = [
+ "unicode-segmentation",
+]
+
[[package]]
name = "heck"
version = "0.4.0"
@@ -2692,6 +2922,49 @@ version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38"
+[[package]]
+name = "jito-programs-vote-state"
+version = "0.1.5"
+dependencies = [
+ "anchor-lang",
+ "bincode",
+ "serde",
+ "serde_derive",
+ "solana-program",
+]
+
+[[package]]
+name = "jito-protos"
+version = "1.17.34"
+dependencies = [
+ "bytes",
+ "prost",
+ "prost-types",
+ "protobuf-src",
+ "tonic",
+ "tonic-build",
+]
+
+[[package]]
+name = "jito-tip-distribution"
+version = "0.1.5"
+dependencies = [
+ "anchor-lang",
+ "default-env",
+ "jito-programs-vote-state",
+ "solana-program",
+ "solana-security-txt",
+]
+
+[[package]]
+name = "jito-tip-payment"
+version = "0.1.5"
+dependencies = [
+ "anchor-lang",
+ "default-env",
+ "solana-security-txt",
+]
+
[[package]]
name = "jobserver"
version = "0.1.24"
@@ -2772,8 +3045,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b939a78fa820cdfcb7ee7484466746a7377760970f6f9c6fe19f9edcc8a38d2"
dependencies = [
"proc-macro-crate 0.1.5",
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -3141,9 +3414,9 @@ dependencies = [
[[package]]
name = "mio"
-version = "0.8.8"
+version = "0.8.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2"
+checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c"
dependencies = [
"libc",
"wasi 0.11.0+wasi-snapshot-preview1",
@@ -3166,8 +3439,8 @@ version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a7d5f7076603ebc68de2dc6a650ec331a062a13abaa346975be747bbfa4b789"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -3299,8 +3572,8 @@ version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -3310,8 +3583,8 @@ version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 2.0.38",
]
@@ -3402,8 +3675,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799"
dependencies = [
"proc-macro-crate 1.1.0",
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -3414,8 +3687,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6"
dependencies = [
"proc-macro-crate 1.1.0",
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 2.0.38",
]
@@ -3426,8 +3699,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c11e44798ad209ccdd91fc192f0526a369a01234f7373e1b141c96d7cee4f0e"
dependencies = [
"proc-macro-crate 1.1.0",
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 2.0.38",
]
@@ -3509,8 +3782,8 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -3585,8 +3858,8 @@ checksum = "5f7d21ccd03305a674437ee1248f3ab5d4b1db095cf1caf49f1713ddf61956b7"
dependencies = [
"Inflector",
"proc-macro-error",
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -3739,8 +4012,8 @@ checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55"
dependencies = [
"pest",
"pest_meta",
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -3790,8 +4063,8 @@ version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -3924,7 +4197,7 @@ version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b83ec2d0af5c5c556257ff52c9f98934e243b9fd39604bfb2a9b75ec2e97f18"
dependencies = [
- "proc-macro2",
+ "proc-macro2 1.0.69",
"syn 1.0.109",
]
@@ -3934,7 +4207,7 @@ version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058"
dependencies = [
- "proc-macro2",
+ "proc-macro2 1.0.69",
"syn 2.0.38",
]
@@ -3964,8 +4237,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
dependencies = [
"proc-macro-error-attr",
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
"version_check",
]
@@ -3976,11 +4249,20 @@ version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"version_check",
]
+[[package]]
+name = "proc-macro2"
+version = "0.4.30"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759"
+dependencies = [
+ "unicode-xid 0.1.0",
+]
+
[[package]]
name = "proc-macro2"
version = "1.0.69"
@@ -3990,6 +4272,19 @@ dependencies = [
"unicode-ident",
]
+[[package]]
+name = "proc-macro2-diagnostics"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4bf29726d67464d49fa6224a1d07936a8c08bb3fba727c7493f6cf1616fdaada"
+dependencies = [
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
+ "syn 1.0.109",
+ "version_check",
+ "yansi",
+]
+
[[package]]
name = "proptest"
version = "1.3.1"
@@ -4027,7 +4322,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270"
dependencies = [
"bytes",
- "heck",
+ "heck 0.4.0",
"itertools",
"lazy_static",
"log",
@@ -4050,8 +4345,8 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4"
dependencies = [
"anyhow",
"itertools",
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -4096,8 +4391,8 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 2.0.38",
]
@@ -4155,13 +4450,22 @@ dependencies = [
"windows-sys 0.48.0",
]
+[[package]]
+name = "quote"
+version = "0.6.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1"
+dependencies = [
+ "proc-macro2 0.4.30",
+]
+
[[package]]
name = "quote"
version = "1.0.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae"
dependencies = [
- "proc-macro2",
+ "proc-macro2 1.0.69",
]
[[package]]
@@ -4576,7 +4880,7 @@ checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8"
dependencies = [
"log",
"ring",
- "rustls-webpki",
+ "rustls-webpki 0.101.4",
"sct",
]
@@ -4610,6 +4914,16 @@ dependencies = [
"base64 0.13.1",
]
+[[package]]
+name = "rustls-webpki"
+version = "0.100.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5f6a5fc258f1c1276dfe3016516945546e2d5383911efc0fc4f1cdc5df3a4ae3"
+dependencies = [
+ "ring",
+ "untrusted",
+]
+
[[package]]
name = "rustls-webpki"
version = "0.101.4"
@@ -4684,8 +4998,8 @@ version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bdbda6ac5cd1321e724fa9cee216f3a61885889b896f073b8f82322789c5250e"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -4773,8 +5087,8 @@ version = "1.0.189"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 2.0.38",
]
@@ -4818,8 +5132,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "881b6f881b17d13214e5d494c939ebab463d01264ce1811e9d4ac3a882e7695f"
dependencies = [
"darling",
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 2.0.38",
]
@@ -4868,8 +5182,8 @@ version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 2.0.38",
]
@@ -5105,7 +5419,7 @@ dependencies = [
"assert_matches",
"base64 0.21.4",
"bincode",
- "bs58",
+ "bs58 0.4.0",
"bv",
"lazy_static",
"serde",
@@ -5320,6 +5634,7 @@ dependencies = [
"solana-accounts-db",
"solana-banks-interface",
"solana-client",
+ "solana-gossip",
"solana-runtime",
"solana-sdk",
"solana-send-transaction-service",
@@ -5448,6 +5763,27 @@ dependencies = [
"tempfile",
]
+[[package]]
+name = "solana-bundle"
+version = "1.17.34"
+dependencies = [
+ "anchor-lang",
+ "assert_matches",
+ "itertools",
+ "log",
+ "serde",
+ "solana-accounts-db",
+ "solana-ledger",
+ "solana-logger",
+ "solana-measure",
+ "solana-poh",
+ "solana-program-runtime",
+ "solana-runtime",
+ "solana-sdk",
+ "solana-transaction-status",
+ "thiserror",
+]
+
[[package]]
name = "solana-cargo-build-bpf"
version = "1.17.34"
@@ -5533,7 +5869,7 @@ version = "1.17.34"
dependencies = [
"assert_matches",
"bincode",
- "bs58",
+ "bs58 0.4.0",
"clap 2.33.3",
"console",
"const_format",
@@ -5732,10 +6068,11 @@ dependencies = [
name = "solana-core"
version = "1.17.34"
dependencies = [
+ "anchor-lang",
"assert_matches",
"base64 0.21.4",
"bincode",
- "bs58",
+ "bs58 0.4.0",
"bytes",
"chrono",
"crossbeam-channel",
@@ -5746,11 +6083,16 @@ dependencies = [
"futures 0.3.28",
"histogram",
"itertools",
+ "jito-protos",
+ "jito-tip-distribution",
+ "jito-tip-payment",
"lazy_static",
"log",
"lru",
"min-max-heap",
"num_enum 0.6.1",
+ "prost",
+ "prost-types",
"quinn",
"rand 0.8.5",
"rand_chacha 0.3.1",
@@ -5767,6 +6109,7 @@ dependencies = [
"serial_test",
"solana-accounts-db",
"solana-bloom",
+ "solana-bundle",
"solana-client",
"solana-core",
"solana-cost-model",
@@ -5783,11 +6126,13 @@ dependencies = [
"solana-perf",
"solana-poh",
"solana-program-runtime",
+ "solana-program-test",
"solana-quic-client",
"solana-rayon-threadlimit",
"solana-rpc",
"solana-rpc-client-api",
"solana-runtime",
+ "solana-runtime-plugin",
"solana-sdk",
"solana-send-transaction-service",
"solana-stake-program",
@@ -5808,6 +6153,8 @@ dependencies = [
"test-case",
"thiserror",
"tokio",
+ "tonic",
+ "tonic-build",
"trees",
]
@@ -5941,7 +6288,7 @@ dependencies = [
"bitflags 2.3.3",
"blake3",
"block-buffer 0.10.4",
- "bs58",
+ "bs58 0.4.0",
"bv",
"byteorder",
"cc",
@@ -5967,8 +6314,8 @@ dependencies = [
name = "solana-frozen-abi-macro"
version = "1.17.34"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"rustc_version 0.4.0",
"syn 2.0.38",
]
@@ -6023,7 +6370,7 @@ dependencies = [
name = "solana-geyser-plugin-manager"
version = "1.17.34"
dependencies = [
- "bs58",
+ "bs58 0.4.0",
"crossbeam-channel",
"json5",
"jsonrpc-core",
@@ -6134,7 +6481,7 @@ dependencies = [
name = "solana-keygen"
version = "1.17.34"
dependencies = [
- "bs58",
+ "bs58 0.4.0",
"clap 3.2.23",
"dirs-next",
"num_cpus",
@@ -6154,7 +6501,7 @@ dependencies = [
"assert_matches",
"bincode",
"bitflags 2.3.3",
- "bs58",
+ "bs58 0.4.0",
"byteorder",
"chrono",
"chrono-humanize",
@@ -6220,7 +6567,7 @@ name = "solana-ledger-tool"
version = "1.17.34"
dependencies = [
"assert_cmd",
- "bs58",
+ "bs58 0.4.0",
"bytecount",
"chrono",
"clap 2.33.3",
@@ -6511,7 +6858,7 @@ dependencies = [
"blake3",
"borsh 0.10.3",
"borsh 0.9.3",
- "bs58",
+ "bs58 0.4.0",
"bv",
"bytemuck",
"cc",
@@ -6694,7 +7041,7 @@ version = "1.17.34"
dependencies = [
"base64 0.21.4",
"bincode",
- "bs58",
+ "bs58 0.4.0",
"crossbeam-channel",
"dashmap 4.0.2",
"itertools",
@@ -6714,6 +7061,7 @@ dependencies = [
"soketto",
"solana-account-decoder",
"solana-accounts-db",
+ "solana-bundle",
"solana-client",
"solana-entry",
"solana-faucet",
@@ -6724,6 +7072,7 @@ dependencies = [
"solana-net-utils",
"solana-perf",
"solana-poh",
+ "solana-program-runtime",
"solana-rayon-threadlimit",
"solana-rpc-client-api",
"solana-runtime",
@@ -6755,7 +7104,7 @@ dependencies = [
"async-trait",
"base64 0.21.4",
"bincode",
- "bs58",
+ "bs58 0.4.0",
"crossbeam-channel",
"futures 0.3.28",
"indicatif",
@@ -6781,7 +7130,7 @@ name = "solana-rpc-client-api"
version = "1.17.34"
dependencies = [
"base64 0.21.4",
- "bs58",
+ "bs58 0.4.0",
"jsonrpc-core",
"reqwest",
"semver 1.0.20",
@@ -6789,6 +7138,8 @@ dependencies = [
"serde_derive",
"serde_json",
"solana-account-decoder",
+ "solana-accounts-db",
+ "solana-bundle",
"solana-sdk",
"solana-transaction-status",
"solana-version",
@@ -6818,13 +7169,14 @@ name = "solana-rpc-test"
version = "1.17.34"
dependencies = [
"bincode",
- "bs58",
+ "bs58 0.4.0",
"crossbeam-channel",
"futures-util",
"log",
"reqwest",
"serde",
"serde_json",
+ "serial_test",
"solana-account-decoder",
"solana-client",
"solana-logger",
@@ -6923,17 +7275,36 @@ dependencies = [
"zstd",
]
+[[package]]
+name = "solana-runtime-plugin"
+version = "1.17.34"
+dependencies = [
+ "crossbeam-channel",
+ "json5",
+ "jsonrpc-core",
+ "jsonrpc-core-client",
+ "jsonrpc-derive",
+ "jsonrpc-ipc-server",
+ "jsonrpc-server-utils",
+ "libloading",
+ "log",
+ "solana-runtime",
+ "solana-sdk",
+ "thiserror",
+]
+
[[package]]
name = "solana-sdk"
version = "1.17.34"
dependencies = [
+ "anchor-lang",
"anyhow",
"assert_matches",
"base64 0.21.4",
"bincode",
"bitflags 2.3.3",
"borsh 0.10.3",
- "bs58",
+ "bs58 0.4.0",
"bytemuck",
"byteorder",
"chrono",
@@ -6985,9 +7356,9 @@ dependencies = [
name = "solana-sdk-macro"
version = "1.17.34"
dependencies = [
- "bs58",
- "proc-macro2",
- "quote",
+ "bs58 0.4.0",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"rustversion",
"syn 2.0.38",
]
@@ -7005,11 +7376,13 @@ dependencies = [
"crossbeam-channel",
"log",
"solana-client",
+ "solana-gossip",
"solana-logger",
"solana-measure",
"solana-metrics",
"solana-runtime",
"solana-sdk",
+ "solana-streamer",
"solana-tpu-client",
]
@@ -7083,7 +7456,7 @@ name = "solana-storage-proto"
version = "1.17.34"
dependencies = [
"bincode",
- "bs58",
+ "bs58 0.4.0",
"enum-iterator",
"prost",
"protobuf-src",
@@ -7197,6 +7570,44 @@ dependencies = [
"solana-sdk",
]
+[[package]]
+name = "solana-tip-distributor"
+version = "1.17.34"
+dependencies = [
+ "anchor-lang",
+ "clap 4.3.21",
+ "crossbeam-channel",
+ "env_logger",
+ "futures 0.3.28",
+ "gethostname",
+ "im",
+ "itertools",
+ "jito-tip-distribution",
+ "jito-tip-payment",
+ "log",
+ "num-traits",
+ "rand 0.8.5",
+ "serde",
+ "serde_json",
+ "solana-accounts-db",
+ "solana-client",
+ "solana-genesis-utils",
+ "solana-ledger",
+ "solana-measure",
+ "solana-merkle-tree",
+ "solana-metrics",
+ "solana-program",
+ "solana-program-runtime",
+ "solana-rpc-client-api",
+ "solana-runtime",
+ "solana-sdk",
+ "solana-stake-program",
+ "solana-transaction-status",
+ "solana-vote",
+ "thiserror",
+ "tokio",
+]
+
[[package]]
name = "solana-tokens"
version = "1.17.34"
@@ -7287,7 +7698,7 @@ dependencies = [
"base64 0.21.4",
"bincode",
"borsh 0.10.3",
- "bs58",
+ "bs58 0.4.0",
"lazy_static",
"log",
"serde",
@@ -7410,6 +7821,7 @@ dependencies = [
"solana-rpc-client",
"solana-rpc-client-api",
"solana-runtime",
+ "solana-runtime-plugin",
"solana-sdk",
"solana-send-transaction-service",
"solana-storage-bigtable",
@@ -7422,6 +7834,7 @@ dependencies = [
"symlink",
"thiserror",
"tikv-jemallocator",
+ "tonic",
]
[[package]]
@@ -7503,7 +7916,7 @@ dependencies = [
name = "solana-zk-keygen"
version = "1.17.34"
dependencies = [
- "bs58",
+ "bs58 0.4.0",
"clap 3.2.23",
"dirs-next",
"num_cpus",
@@ -7647,7 +8060,7 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fadbefec4f3c678215ca72bd71862697bb06b41fd77c0088902dd3203354387b"
dependencies = [
- "quote",
+ "quote 1.0.33",
"spl-discriminator-syn",
"syn 2.0.38",
]
@@ -7658,8 +8071,8 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e5f2044ca42c8938d54d1255ce599c79a1ffd86b677dfab695caa20f9ffc3f2"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"sha2 0.10.8",
"syn 2.0.38",
"thiserror",
@@ -7716,8 +8129,8 @@ version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab5269c8e868da17b6552ef35a51355a017bd8e0eae269c201fef830d35fa52c"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"sha2 0.10.8",
"syn 2.0.38",
]
@@ -7875,9 +8288,9 @@ version = "0.24.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59"
dependencies = [
- "heck",
- "proc-macro2",
- "quote",
+ "heck 0.4.0",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"rustversion",
"syn 1.0.109",
]
@@ -7894,14 +8307,25 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7973cce6668464ea31f176d85b13c7ab3bba2cb3b77a2ed26abd7801688010a"
+[[package]]
+name = "syn"
+version = "0.15.44"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5"
+dependencies = [
+ "proc-macro2 0.4.30",
+ "quote 0.6.13",
+ "unicode-xid 0.1.0",
+]
+
[[package]]
name = "syn"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"unicode-ident",
]
@@ -7911,8 +8335,8 @@ version = "2.0.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"unicode-ident",
]
@@ -7928,10 +8352,10 @@ version = "0.12.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
- "unicode-xid",
+ "unicode-xid 0.2.2",
]
[[package]]
@@ -8033,8 +8457,8 @@ version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ee42b4e559f17bce0385ebf511a7beb67d5cc33c12c96b7f4e9789919d9c10f"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -8083,8 +8507,8 @@ checksum = "54c25e2cb8f5fcd7318157634e8838aa6f7e4715c96637f969fabaccd1ef5462"
dependencies = [
"cfg-if 1.0.0",
"proc-macro-error",
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 2.0.38",
]
@@ -8095,8 +8519,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37cfd7bbc88a0104e304229fba519bdc45501a30b760fb72240342f1289ad257"
dependencies = [
"proc-macro-error",
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 2.0.38",
"test-case-core",
]
@@ -8131,8 +8555,8 @@ version = "1.0.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 2.0.38",
]
@@ -8268,8 +8692,8 @@ name = "tokio-macros"
version = "2.1.0"
source = "git+https://github.com/solana-labs/solana-tokio.git?rev=7cf47705faacf7bf0e43e4131a5377b3291fce21#7cf47705faacf7bf0e43e4131a5377b3291fce21"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 2.0.38",
]
@@ -8395,6 +8819,7 @@ dependencies = [
"percent-encoding 2.3.0",
"pin-project",
"prost",
+ "rustls-native-certs",
"rustls-pemfile 1.0.0",
"tokio",
"tokio-rustls",
@@ -8403,6 +8828,7 @@ dependencies = [
"tower-layer",
"tower-service",
"tracing",
+ "webpki-roots 0.23.1",
]
[[package]]
@@ -8412,9 +8838,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07"
dependencies = [
"prettyplease 0.1.9",
- "proc-macro2",
+ "proc-macro2 1.0.69",
"prost-build",
- "quote",
+ "quote 1.0.33",
"syn 1.0.109",
]
@@ -8468,8 +8894,8 @@ version = "0.1.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 2.0.38",
]
@@ -8587,12 +9013,24 @@ dependencies = [
"tinyvec",
]
+[[package]]
+name = "unicode-segmentation"
+version = "1.10.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36"
+
[[package]]
name = "unicode-width"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
+[[package]]
+name = "unicode-xid"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc"
+
[[package]]
name = "unicode-xid"
version = "0.2.2"
@@ -8674,6 +9112,12 @@ version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7cf7d77f457ef8dfa11e4cd5933c5ddb5dc52a94664071951219a97710f0a32b"
+[[package]]
+name = "utf8parse"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
+
[[package]]
name = "valuable"
version = "0.1.0"
@@ -8765,8 +9209,8 @@ dependencies = [
"bumpalo",
"log",
"once_cell",
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 2.0.38",
"wasm-bindgen-shared",
]
@@ -8789,7 +9233,7 @@ version = "0.2.87"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d"
dependencies = [
- "quote",
+ "quote 1.0.33",
"wasm-bindgen-macro-support",
]
@@ -8799,8 +9243,8 @@ version = "0.2.87"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 2.0.38",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
@@ -8822,13 +9266,22 @@ dependencies = [
"wasm-bindgen",
]
+[[package]]
+name = "webpki-roots"
+version = "0.23.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338"
+dependencies = [
+ "rustls-webpki 0.100.3",
+]
+
[[package]]
name = "webpki-roots"
version = "0.24.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b291546d5d9d1eab74f069c77749f2cb8504a12caa20f0f2de93ddbf6f411888"
dependencies = [
- "rustls-webpki",
+ "rustls-webpki 0.101.4",
]
[[package]]
@@ -8909,6 +9362,15 @@ dependencies = [
"windows-targets 0.48.0",
]
+[[package]]
+name = "windows-sys"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
+dependencies = [
+ "windows-targets 0.52.0",
+]
+
[[package]]
name = "windows-targets"
version = "0.42.2"
@@ -8939,6 +9401,21 @@ dependencies = [
"windows_x86_64_msvc 0.48.0",
]
+[[package]]
+name = "windows-targets"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd"
+dependencies = [
+ "windows_aarch64_gnullvm 0.52.0",
+ "windows_aarch64_msvc 0.52.0",
+ "windows_i686_gnu 0.52.0",
+ "windows_i686_msvc 0.52.0",
+ "windows_x86_64_gnu 0.52.0",
+ "windows_x86_64_gnullvm 0.52.0",
+ "windows_x86_64_msvc 0.52.0",
+]
+
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.42.2"
@@ -8951,6 +9428,12 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc"
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea"
+
[[package]]
name = "windows_aarch64_msvc"
version = "0.42.2"
@@ -8963,6 +9446,12 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3"
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef"
+
[[package]]
name = "windows_i686_gnu"
version = "0.42.2"
@@ -8975,6 +9464,12 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241"
+[[package]]
+name = "windows_i686_gnu"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313"
+
[[package]]
name = "windows_i686_msvc"
version = "0.42.2"
@@ -8987,6 +9482,12 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00"
+[[package]]
+name = "windows_i686_msvc"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a"
+
[[package]]
name = "windows_x86_64_gnu"
version = "0.42.2"
@@ -8999,6 +9500,12 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1"
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd"
+
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.42.2"
@@ -9011,6 +9518,12 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953"
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e"
+
[[package]]
name = "windows_x86_64_msvc"
version = "0.42.2"
@@ -9023,6 +9536,12 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04"
+
[[package]]
name = "winreg"
version = "0.50.0"
@@ -9069,6 +9588,12 @@ dependencies = [
"linked-hash-map",
]
+[[package]]
+name = "yansi"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec"
+
[[package]]
name = "yasna"
version = "0.5.0"
@@ -9093,8 +9618,8 @@ version = "0.7.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 2.0.38",
]
@@ -9113,8 +9638,8 @@ version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
dependencies = [
- "proc-macro2",
- "quote",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
"syn 2.0.38",
]
diff --git a/Cargo.toml b/Cargo.toml
index f6814187a717a4..3ddc38099c73b4 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -17,6 +17,7 @@ members = [
"bench-tps",
"bloom",
"bucket_map",
+ "bundle",
"clap-utils",
"clap-v3-utils",
"cli",
@@ -39,6 +40,7 @@ members = [
"geyser-plugin-manager",
"gossip",
"install",
+ "jito-protos",
"keygen",
"ledger",
"ledger-tool",
@@ -83,6 +85,7 @@ members = [
"rpc-client-nonce-utils",
"rpc-test",
"runtime",
+ "runtime-plugin",
"runtime/store-tool",
"sdk",
"sdk/cargo-build-bpf",
@@ -100,6 +103,7 @@ members = [
"streamer",
"test-validator",
"thin-client",
+ "tip-distributor",
"tokens",
"tpu-client",
"transaction-dos",
@@ -116,6 +120,8 @@ members = [
]
exclude = [
+ "anchor",
+ "jito-programs",
"programs/sbf",
]
@@ -134,6 +140,7 @@ edition = "2021"
aes-gcm-siv = "0.10.3"
ahash = "=0.8.5"
anyhow = "1.0.75"
+anchor-lang = { path = "anchor/lang" }
ark-bn254 = "0.4.0"
ark-ec = "0.4.0"
ark-ff = "0.4.0"
@@ -221,6 +228,9 @@ Inflector = "0.11.4"
itertools = "0.10.5"
jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = ["unprefixed_malloc_on_supported_platforms"] }
js-sys = "0.3.64"
+jito-protos = { path = "jito-protos", version = "=1.17.34" }
+jito-tip-distribution = { path = "jito-programs/mev-programs/programs/tip-distribution", features = ["no-entrypoint"] }
+jito-tip-payment = { path = "jito-programs/mev-programs/programs/tip-payment", features = ["no-entrypoint"] }
json5 = "0.4.1"
jsonrpc-core = "18.0.0"
jsonrpc-core-client = "18.0.0"
@@ -310,6 +320,7 @@ solana-bench-tps = { path = "bench-tps", version = "=1.17.34" }
solana-bloom = { path = "bloom", version = "=1.17.34" }
solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=1.17.34" }
solana-bucket-map = { path = "bucket_map", version = "=1.17.34" }
+solana-bundle = { path = "bundle", version = "=1.17.34" }
solana-connection-cache = { path = "connection-cache", version = "=1.17.34", default-features = false }
solana-clap-utils = { path = "clap-utils", version = "=1.17.34" }
solana-clap-v3-utils = { path = "clap-v3-utils", version = "=1.17.34" }
@@ -354,6 +365,7 @@ solana-rpc-client = { path = "rpc-client", version = "=1.17.34", default-feature
solana-rpc-client-api = { path = "rpc-client-api", version = "=1.17.34" }
solana-rpc-client-nonce-utils = { path = "rpc-client-nonce-utils", version = "=1.17.34" }
solana-runtime = { path = "runtime", version = "=1.17.34" }
+solana-runtime-plugin = { path = "runtime-plugin", version = "=1.17.34" }
solana-sdk = { path = "sdk", version = "=1.17.34" }
solana-sdk-macro = { path = "sdk/macro", version = "=1.17.34" }
solana-send-transaction-service = { path = "send-transaction-service", version = "=1.17.34" }
diff --git a/README.md b/README.md
index 4fccacf2ba0672..750e7978959df6 100644
--- a/README.md
+++ b/README.md
@@ -4,142 +4,9 @@
-[![Solana crate](https://img.shields.io/crates/v/solana-core.svg)](https://crates.io/crates/solana-core)
-[![Solana documentation](https://docs.rs/solana-core/badge.svg)](https://docs.rs/solana-core)
-[![Build status](https://badge.buildkite.com/8cc350de251d61483db98bdfc895b9ea0ac8ffa4a32ee850ed.svg?branch=master)](https://buildkite.com/solana-labs/solana/builds?branch=master)
-[![codecov](https://codecov.io/gh/solana-labs/solana/branch/master/graph/badge.svg)](https://codecov.io/gh/solana-labs/solana)
+[![Build status](https://badge.buildkite.com/3a7c88c0f777e1a0fddacc190823565271ae4c251ef78d83a8.svg)](https://buildkite.com/jito/jito-solana)
-# Building
+# About
+This repository contains Jito's fork of the Solana validator.
-## **1. Install rustc, cargo and rustfmt.**
-
-```bash
-$ curl https://sh.rustup.rs -sSf | sh
-$ source $HOME/.cargo/env
-$ rustup component add rustfmt
-```
-
-When building the master branch, please make sure you are using the latest stable rust version by running:
-
-```bash
-$ rustup update
-```
-
-When building a specific release branch, you should check the rust version in `ci/rust-version.sh` and if necessary, install that version by running:
-```bash
-$ rustup install VERSION
-```
-Note that if this is not the latest rust version on your machine, cargo commands may require an [override](https://rust-lang.github.io/rustup/overrides.html) in order to use the correct version.
-
-On Linux systems you may need to install libssl-dev, pkg-config, zlib1g-dev, protobuf etc.
-
-On Ubuntu:
-```bash
-$ sudo apt-get update
-$ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang cmake make libprotobuf-dev protobuf-compiler
-```
-
-On Fedora:
-```bash
-$ sudo dnf install openssl-devel systemd-devel pkg-config zlib-devel llvm clang cmake make protobuf-devel protobuf-compiler perl-core
-```
-
-## **2. Download the source code.**
-
-```bash
-$ git clone https://github.com/solana-labs/solana.git
-$ cd solana
-```
-
-## **3. Build.**
-
-```bash
-$ ./cargo build
-```
-
-# Testing
-
-**Run the test suite:**
-
-```bash
-$ ./cargo test
-```
-
-### Starting a local testnet
-Start your own testnet locally, instructions are in the [online docs](https://docs.solana.com/cluster/bench-tps).
-
-### Accessing the remote development cluster
-* `devnet` - stable public cluster for development accessible via
-devnet.solana.com. Runs 24/7. Learn more about the [public clusters](https://docs.solana.com/clusters)
-
-# Benchmarking
-
-First, install the nightly build of rustc. `cargo bench` requires the use of the
-unstable features only available in the nightly build.
-
-```bash
-$ rustup install nightly
-```
-
-Run the benchmarks:
-
-```bash
-$ cargo +nightly bench
-```
-
-# Release Process
-
-The release process for this project is described [here](RELEASE.md).
-
-# Code coverage
-
-To generate code coverage statistics:
-
-```bash
-$ scripts/coverage.sh
-$ open target/cov/lcov-local/index.html
-```
-
-Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer
-productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to
-some problem. Our unit-test suite is how we encode the set of *problems* the codebase solves. Running
-the test suite should indicate that your change didn't *infringe* on anyone else's solutions. Adding a
-test *protects* your solution from future changes. Say you don't understand why a line of code exists,
-try deleting it and running the unit-tests. The nearest test failure should tell you what problem
-was solved by that code. If no test fails, go ahead and submit a Pull Request that asks, "what
-problem is solved by this code?" On the other hand, if a test does fail and you can think of a
-better way to solve the same problem, a Pull Request with your solution would most certainly be
-welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please
-send us that patch!
-
-# Disclaimer
-
-All claims, content, designs, algorithms, estimates, roadmaps,
-specifications, and performance measurements described in this project
-are done with the Solana Labs, Inc. (“SL”) good faith efforts. It is up to
-the reader to check and validate their accuracy and truthfulness.
-Furthermore, nothing in this project constitutes a solicitation for
-investment.
-
-Any content produced by SL or developer resources that SL provides are
-for educational and inspirational purposes only. SL does not encourage,
-induce or sanction the deployment, integration or use of any such
-applications (including the code comprising the Solana blockchain
-protocol) in violation of applicable laws or regulations and hereby
-prohibits any such deployment, integration or use. This includes the use of
-any such applications by the reader (a) in violation of export control
-or sanctions laws of the United States or any other applicable
-jurisdiction, (b) if the reader is located in or ordinarily resident in
-a country or territory subject to comprehensive sanctions administered
-by the U.S. Office of Foreign Assets Control (OFAC), or (c) if the
-reader is or is working on behalf of a Specially Designated National
-(SDN) or a person subject to similar blocking or denied party
-prohibitions.
-
-The reader should be aware that U.S. export control and sanctions laws prohibit
-U.S. persons (and other persons that are subject to such laws) from transacting
-with persons in certain countries and territories or that are on the SDN list.
-Accordingly, there is a risk to individuals that other persons using any of the
-code contained in this repo, or a derivation thereof, may be sanctioned persons
-and that transactions with such persons would be a violation of U.S. export
-controls and sanctions law.
+We recommend checking out our [Gitbook](https://jito-foundation.gitbook.io/mev/jito-solana/building-the-software) for more detailed instructions on building and running Jito-Solana.
diff --git a/RELEASE.md b/RELEASE.md
index c5aa5d540b1191..5c32ff423e2c32 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -107,7 +107,7 @@ Alternatively use the Github UI.
### Create the Release Tag on GitHub
-1. Go to [GitHub Releases](https://github.com/solana-labs/solana/releases) for tagging a release.
+1. Go to [GitHub Releases](https://github.com/jito-foundation/jito-solana/releases) for tagging a release.
1. Click "Draft new release". The release tag must exactly match the `version`
field in `/Cargo.toml` prefixed by `v`.
1. If the Cargo.toml version field is **0.12.3**, then the release tag must be **v0.12.3**
@@ -115,7 +115,7 @@ Alternatively use the Github UI.
1. If you want to release v0.12.0, the target branch must be v0.12
1. Fill the release notes.
1. If this is the first release on the branch (e.g. v0.13.**0**), paste in [this
- template](https://raw.githubusercontent.com/solana-labs/solana/master/.github/RELEASE_TEMPLATE.md). Engineering Lead can provide summary contents for release notes if needed.
+ template](https://raw.githubusercontent.com/jito-foundation/jito-solana/master/.github/RELEASE_TEMPLATE.md). Engineering Lead can provide summary contents for release notes if needed.
1. If this is a patch release, review all the commits since the previous release on this branch and add details as needed.
1. Click "Save Draft", then confirm the release notes look good and the tag name and branch are correct.
1. Ensure all desired commits (usually backports) are landed on the branch by now.
@@ -126,16 +126,16 @@ Alternatively use the Github UI.
### Update release branch with the next patch version
-[This action](https://github.com/solana-labs/solana/blob/master/.github/workflows/increment-cargo-version-on-release.yml) ensures that publishing a release will trigger the creation of a PR to update the Cargo.toml files on **release branch** to the next semantic version (e.g. 0.9.0 -> 0.9.1). Ensure that the created PR makes it through CI and gets submitted.
+[This action](https://github.com/jito-foundation/jito-solana/blob/master/.github/workflows/increment-cargo-version-on-release.yml) ensures that publishing a release will trigger the creation of a PR to update the Cargo.toml files on **release branch** to the next semantic version (e.g. 0.9.0 -> 0.9.1). Ensure that the created PR makes it through CI and gets submitted.
### Prepare for the next release
-1. Go to [GitHub Releases](https://github.com/solana-labs/solana/releases) and create a new draft release for `X.Y.Z+1` with empty release notes. This allows people to incrementally add new release notes until it's time for the next release
+1. Go to [GitHub Releases](https://github.com/jito-foundation/jito-solana/releases) and create a new draft release for `X.Y.Z+1` with empty release notes. This allows people to incrementally add new release notes until it's time for the next release
1. Also, point the branch field to the same branch and mark the release as **"This is a pre-release"**.
-1. Go to the [Github Milestones](https://github.com/solana-labs/solana/milestones). Create a new milestone for the `X.Y.Z+1`, move over
+1. Go to the [Github Milestones](https://github.com/jito-foundation/jito-solana/milestones). Create a new milestone for the `X.Y.Z+1`, move over
unresolved issues still in the `X.Y.Z` milestone, then close the `X.Y.Z` milestone.
### Verify release automation success
-Go to [Solana Releases](https://github.com/solana-labs/solana/releases) and click on the latest release that you just published.
+Go to [Solana Releases](https://github.com/jito-foundation/jito-solana/releases) and click on the latest release that you just published.
Verify that all of the build artifacts are present, then uncheck **"This is a pre-release"** for the release.
Build artifacts can take up to 60 minutes after creating the tag before
diff --git a/SECURITY.md b/SECURITY.md
deleted file mode 100644
index 905316c2dc3da4..00000000000000
--- a/SECURITY.md
+++ /dev/null
@@ -1,167 +0,0 @@
-# Security Policy
-
-1. [Reporting security problems](#reporting)
-4. [Security Bug Bounties](#bounty)
-2. [Incident Response Process](#process)
-
-
-## Reporting security problems in the Solana Labs Validator Client
-
-**DO NOT CREATE A GITHUB ISSUE** to report a security problem.
-
-Instead please use this [Report a Vulnerability](https://github.com/solana-labs/solana/security/advisories/new) link.
-Provide a helpful title, detailed description of the vulnerability and an exploit
-proof-of-concept. Speculative submissions without proof-of-concept will be closed
-with no further consideration.
-
-If you haven't done so already, please **enable two-factor auth** in your GitHub account.
-
-Expect a response as fast as possible in the advisory, typically within 72 hours.
-
---
-
-If you do not receive a response in the advisory, send an email to
-security@solanalabs.com with the full URL of the advisory you have created. DO NOT
-include attachments or provide detail sufficient for exploitation regarding the
-security issue in this email. **Only provide such details in the advisory**.
-
-If you do not receive a response from security@solanalabs.com please followup with
-the team directly. You can do this in the `#core-technology` channel of the
-[Solana Tech discord server](https://solana.com/discord), by pinging the `Solana Labs`
-role in the channel and referencing the fact that you submitted a security problem.
-
-
-## Incident Response Process
-
-In case an incident is discovered or reported, the following process will be
-followed to contain, respond and remediate:
-
-### 1. Accept the new report
-In response a newly reported security problem, a member of the
-`solana-labs/admins` group will accept the report to turn it into a draft
-advisory. The `solana-labs/security-incident-response` group should be added to
-the draft security advisory, and create a private fork of the repository (grey
-button towards the bottom of the page) if necessary.
-
-If the advisory is the result of an audit finding, follow the same process as above but add the auditor's github user(s) and begin the title with "[Audit]".
-
-If the report is out of scope, a member of the `solana-labs/admins` group will
-comment as such and then close the report.
-
-### 2. Triage
-Within the draft security advisory, discuss and determine the severity of the issue. If necessary, members of the solana-labs/security-incident-response group may add other github users to the advisory to assist.
-If it is determined that this not a critical network issue then the advisory should be closed and if more follow-up is required a normal Solana public github issue should be created.
-
-### 3. Prepare Fixes
-For the affected branches, typically all three (edge, beta and stable), prepare a fix for the issue and push them to the corresponding branch in the private repository associated with the draft security advisory.
-There is no CI available in the private repository so you must build from source and manually verify fixes.
-Code review from the reporter is ideal, as well as from multiple members of the core development team.
-
-### 4. Notify Security Group Validators
-Once an ETA is available for the fix, a member of the solana-labs/security-incident-response group should notify the validators so they can prepare for an update using the "Solana Red Alert" notification system.
-The teams are all over the world and it's critical to provide actionable information at the right time. Don't be the person that wakes everybody up at 2am when a fix won't be available for hours.
-
-### 5. Ship the patch
-Once the fix is accepted, a member of the solana-labs/security-incident-response group should prepare a single patch file for each affected branch. The commit title for the patch should only contain the advisory id, and not disclose any further details about the incident.
-Copy the patches to https://release.solana.com/ under a subdirectory named after the advisory id (example: https://release.solana.com/GHSA-hx59-f5g4-jghh/v1.4.patch). Contact a member of the solana-labs/admins group if you require access to release.solana.com
-Using the "Solana Red Alert" channel:
- a) Notify validators that there's an issue and a patch will be provided in X minutes
- b) If X minutes expires and there's no patch, notify of the delay and provide a new ETA
- c) Provide links to patches of https://release.solana.com/ for each affected branch
-Validators can be expected to build the patch from source against the latest release for the affected branch.
-Since the software version will not change after the patch is applied, request that each validator notify in the existing channel once they've updated. Manually monitor the roll out until a sufficient amount of stake has updated - typically at least 33.3% or 66.6% depending on the issue.
-
-### 6. Public Disclosure and Release
-Once the fix has been deployed to the security group validators, the patches from the security advisory may be merged into the main source repository. A new official release for each affected branch should be shipped and all validators requested to upgrade as quickly as possible.
-
-### 7. Security Advisory Bounty Accounting and Cleanup
-If this issue is [eligible](#eligibility) for a bounty, prefix the title of the
-security advisory with one of the following, depending on the severity:
-- [Bounty Category: Critical: Loss of Funds]
-- [Bounty Category: Critical: Consensus / Safety Violations]
-- [Bounty Category: Critical: Liveness / Loss of Availability]
-- [Bounty Category: Critical: DoS Attacks]
-- [Bounty Category: Supply Chain Attacks]
-- [Bounty Category: RPC]
-
-Confirm with the reporter that they agree with the severity assessment, and discuss as required to reach a conclusion.
-
-We currently do not use the Github workflow to publish security advisories. Once the issue and fix have been disclosed, and a bounty category is assessed if appropriate, the GitHub security advisory is no longer needed and can be closed.
-
-
-## Security Bug Bounties
-At its sole discretion, the Solana Foundation may offer a bounty for
-[valid reports](#reporting) of critical Solana vulnerabilities. Please see below
-for more details. The submitter is not required to provide a
-mitigation to qualify.
-
-#### Loss of Funds:
-$2,000,000 USD in locked SOL tokens (locked for 12 months)
-* Theft of funds without users signature from any account
-* Theft of funds without users interaction in system, token, stake, vote programs
-* Theft of funds that requires users signature - creating a vote program that drains the delegated stakes.
-
-#### Consensus/Safety Violations:
-$1,000,000 USD in locked SOL tokens (locked for 12 months)
-* Consensus safety violation
-* Tricking a validator to accept an optimistic confirmation or rooted slot without a double vote, etc.
-
-#### Liveness / Loss of Availability:
-$400,000 USD in locked SOL tokens (locked for 12 months)
-* Whereby consensus halts and requires human intervention
-* Eclipse attacks,
-* Remote attacks that partition the network,
-
-#### DoS Attacks:
-$100,000 USD in locked SOL tokens (locked for 12 months)
-* Remote resource exaustion via Non-RPC protocols
-
-#### Supply Chain Attacks:
-$100,000 USD in locked SOL tokens (locked for 12 months)
-* Non-social attacks against source code change management, automated testing, release build, release publication and release hosting infrastructure of the monorepo.
-
-#### RPC DoS/Crashes:
-$5,000 USD in locked SOL tokens (locked for 12 months)
-* RPC attacks
-
-### Out of Scope:
-The following components are out of scope for the bounty program
-* Metrics: `/metrics` in the monorepo as well as https://metrics.solana.com
-* Any encrypted credentials, auth tokens, etc. checked into the repo
-* Bugs in dependencies. Please take them upstream!
-* Attacks that require social engineering
-* Any undeveloped automated tooling (scanners, etc) results. (OK with developed PoC)
-* Any asset whose source code does not exist in this repository (including, but not limited
-to, any and all web properties not explicitly listed on this page)
-
-### Eligibility:
-* Submissions _MUST_ include an exploit proof-of-concept to be considered eligible
-* The participant submitting the bug report shall follow the process outlined within this document
-* Valid exploits can be eligible even if they are not successfully executed on a public cluster
-* Multiple submissions for the same class of exploit are still eligible for compensation, though may be compensated at a lower rate, however these will be assessed on a case-by-case basis
-* Participants must complete KYC and sign the participation agreement here when the registrations are open https://solana.foundation/kyc. Security exploits will still be assessed and open for submission at all times. This needs only be done prior to distribution of tokens.
-
-### Duplicate Reports
-Compensation for duplicative reports will be split among reporters with first to report taking priority using the following equation
-```
-R: total reports
-ri: report priority
-bi: bounty share
-
-bi = 2 ^ (R - ri) / ((2^R) - 1)
-```
-#### Bounty Split Examples
-| total reports | priority | share | | total reports | priority | share | | total reports | priority | share |
-| ------------- | -------- | -----: | - | ------------- | -------- | -----: | - | ------------- | -------- | -----: |
-| 1 | 1 | 100% | | 2 | 1 | 66.67% | | 5 | 1 | 51.61% |
-| | | | | 2 | 2 | 33.33% | | 5 | 2 | 25.81% |
-| 4 | 1 | 53.33% | | | | | | 5 | 3 | 12.90% |
-| 4 | 2 | 26.67% | | 3 | 1 | 57.14% | | 5 | 4 | 6.45% |
-| 4 | 3 | 13.33% | | 3 | 2 | 28.57% | | 5 | 5 | 3.23% |
-| 4 | 4 | 6.67% | | 3 | 3 | 14.29% | | | | |
-
-### Payment of Bug Bounties:
-* Bounties are currently awarded on a rolling/weekly basis and paid out within 30 days upon receipt of an invoice.
-* The SOL/USD conversion rate used for payments is the market price of SOL (denominated in USD) at the end of the day the invoice is submitted by the researcher.
-* The reference for this price is the Closing Price given by Coingecko.com on that date given here: https://www.coingecko.com/en/coins/solana/historical_data/usd#panel
-* Bug bounties that are paid out in SOL are paid to stake accounts with a lockup expiring 12 months from the date of delivery of SOL.
diff --git a/accounts-db/src/account_overrides.rs b/accounts-db/src/account_overrides.rs
index ee8e7ec9e21f94..d5d3286426008b 100644
--- a/accounts-db/src/account_overrides.rs
+++ b/accounts-db/src/account_overrides.rs
@@ -4,12 +4,16 @@ use {
};
/// Encapsulates overridden accounts, typically used for transaction simulations
-#[derive(Default)]
+#[derive(Clone, Default)]
pub struct AccountOverrides {
accounts: HashMap,
}
impl AccountOverrides {
+ pub fn upsert_account_overrides(&mut self, other: AccountOverrides) {
+ self.accounts.extend(other.accounts);
+ }
+
pub fn set_account(&mut self, pubkey: &Pubkey, account: Option) {
match account {
Some(account) => self.accounts.insert(*pubkey, account),
diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs
index 47b372d981843a..3f8a1d677a21b0 100644
--- a/accounts-db/src/accounts.rs
+++ b/accounts-db/src/accounts.rs
@@ -1173,19 +1173,24 @@ impl Accounts {
}
fn lock_account(
- &self,
account_locks: &mut AccountLocks,
writable_keys: Vec<&Pubkey>,
readonly_keys: Vec<&Pubkey>,
+ additional_read_locks: &HashSet,
+ additional_write_locks: &HashSet,
) -> Result<()> {
for k in writable_keys.iter() {
- if account_locks.is_locked_write(k) || account_locks.is_locked_readonly(k) {
+ if account_locks.is_locked_write(k)
+ || account_locks.is_locked_readonly(k)
+ || additional_write_locks.contains(k)
+ || additional_read_locks.contains(k)
+ {
debug!("Writable account in use: {:?}", k);
return Err(TransactionError::AccountInUse);
}
}
for k in readonly_keys.iter() {
- if account_locks.is_locked_write(k) {
+ if account_locks.is_locked_write(k) || additional_write_locks.contains(k) {
debug!("Read-only account in use: {:?}", k);
return Err(TransactionError::AccountInUse);
}
@@ -1230,7 +1235,11 @@ impl Accounts {
let tx_account_locks_results: Vec> = txs
.map(|tx| tx.get_account_locks(tx_account_lock_limit))
.collect();
- self.lock_accounts_inner(tx_account_locks_results)
+ self.lock_accounts_inner(
+ tx_account_locks_results,
+ &HashSet::default(),
+ &HashSet::default(),
+ )
}
#[must_use]
@@ -1240,6 +1249,8 @@ impl Accounts {
txs: impl Iterator- ,
results: impl Iterator
- >,
tx_account_lock_limit: usize,
+ additional_read_locks: &HashSet
,
+ additional_write_locks: &HashSet,
) -> Vec> {
let tx_account_locks_results: Vec> = txs
.zip(results)
@@ -1248,22 +1259,30 @@ impl Accounts {
Err(err) => Err(err),
})
.collect();
- self.lock_accounts_inner(tx_account_locks_results)
+ self.lock_accounts_inner(
+ tx_account_locks_results,
+ additional_read_locks,
+ additional_write_locks,
+ )
}
#[must_use]
fn lock_accounts_inner(
&self,
tx_account_locks_results: Vec>,
+ additional_read_locks: &HashSet,
+ additional_write_locks: &HashSet,
) -> Vec> {
let account_locks = &mut self.account_locks.lock().unwrap();
tx_account_locks_results
.into_iter()
.map(|tx_account_locks_result| match tx_account_locks_result {
- Ok(tx_account_locks) => self.lock_account(
+ Ok(tx_account_locks) => Self::lock_account(
account_locks,
tx_account_locks.writable,
tx_account_locks.readonly,
+ additional_read_locks,
+ additional_write_locks,
),
Err(err) => Err(err),
})
@@ -1313,7 +1332,7 @@ impl Accounts {
lamports_per_signature: u64,
include_slot_in_hash: IncludeSlotInHash,
) {
- let (accounts_to_store, transactions) = self.collect_accounts_to_store(
+ let (accounts_to_store, transactions) = Self::collect_accounts_to_store(
txs,
res,
loaded,
@@ -1340,8 +1359,7 @@ impl Accounts {
}
#[allow(clippy::too_many_arguments)]
- fn collect_accounts_to_store<'a>(
- &self,
+ pub fn collect_accounts_to_store<'a>(
txs: &'a [SanitizedTransaction],
execution_results: &'a [TransactionExecutionResult],
load_results: &'a mut [TransactionLoadResult],
@@ -1410,6 +1428,55 @@ impl Accounts {
}
(accounts, transactions)
}
+
+ #[must_use]
+ fn lock_accounts_sequential_inner(
+ &self,
+ tx_account_locks_results: Vec>,
+ ) -> Vec> {
+ let mut l_account_locks = self.account_locks.lock().unwrap();
+ Self::lock_accounts_sequential(&mut l_account_locks, tx_account_locks_results)
+ }
+
+ pub fn lock_accounts_sequential(
+ account_locks: &mut AccountLocks,
+ tx_account_locks_results: Vec>,
+ ) -> Vec> {
+ let mut account_in_use_set = false;
+ tx_account_locks_results
+ .into_iter()
+ .map(|tx_account_locks_result| match tx_account_locks_result {
+ Ok(tx_account_locks) => match account_in_use_set {
+ true => Err(TransactionError::AccountInUse),
+ false => {
+ let locked = Self::lock_account(
+ account_locks,
+ tx_account_locks.writable,
+ tx_account_locks.readonly,
+ &HashSet::default(),
+ &HashSet::default(),
+ );
+ if matches!(locked, Err(TransactionError::AccountInUse)) {
+ account_in_use_set = true;
+ }
+ locked
+ }
+ },
+ Err(err) => Err(err),
+ })
+ .collect()
+ }
+
+ pub fn lock_accounts_sequential_with_results<'a>(
+ &self,
+ txs: impl Iterator- ,
+ tx_account_lock_limit: usize,
+ ) -> Vec
> {
+ let tx_account_locks_results: Vec> = txs
+ .map(|tx| tx.get_account_locks(tx_account_lock_limit))
+ .collect();
+ self.lock_accounts_sequential_inner(tx_account_locks_results)
+ }
}
fn prepare_if_nonce_account(
@@ -1498,6 +1565,7 @@ mod tests {
sync::atomic::{AtomicBool, AtomicU64, Ordering},
thread, time,
},
+ Accounts,
};
fn new_sanitized_tx(
@@ -3171,6 +3239,8 @@ mod tests {
txs.iter(),
qos_results.into_iter(),
MAX_TX_ACCOUNT_LOCKS,
+ &HashSet::default(),
+ &HashSet::default(),
);
assert!(results[0].is_ok()); // Read-only account (keypair0) can be referenced multiple times
@@ -3292,7 +3362,7 @@ mod tests {
}
let txs = vec![tx0.clone(), tx1.clone()];
let execution_results = vec![new_execution_result(Ok(()), None); 2];
- let (collected_accounts, transactions) = accounts.collect_accounts_to_store(
+ let (collected_accounts, transactions) = Accounts::collect_accounts_to_store(
&txs,
&execution_results,
loaded.as_mut_slice(),
@@ -3756,7 +3826,7 @@ mod tests {
let mut loaded = vec![loaded];
let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique());
- let accounts = Accounts::new_with_config_for_tests(
+ let _accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
@@ -3770,7 +3840,7 @@ mod tests {
)),
nonce.as_ref(),
)];
- let (collected_accounts, _) = accounts.collect_accounts_to_store(
+ let (collected_accounts, _) = Accounts::collect_accounts_to_store(
&txs,
&execution_results,
loaded.as_mut_slice(),
@@ -3869,7 +3939,7 @@ mod tests {
let mut loaded = vec![loaded];
let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique());
- let accounts = Accounts::new_with_config_for_tests(
+ let _accounts = Accounts::new_with_config_for_tests(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
@@ -3883,7 +3953,7 @@ mod tests {
)),
nonce.as_ref(),
)];
- let (collected_accounts, _) = accounts.collect_accounts_to_store(
+ let (collected_accounts, _) = Accounts::collect_accounts_to_store(
&txs,
&execution_results,
loaded.as_mut_slice(),
diff --git a/anchor b/anchor
new file mode 160000
index 00000000000000..4f52f41cbeafb7
--- /dev/null
+++ b/anchor
@@ -0,0 +1 @@
+Subproject commit 4f52f41cbeafb77d85c7b712516dfbeb5b86dd5f
diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs
index 524928aee7c06d..80418326f5aa67 100644
--- a/banking-bench/src/main.rs
+++ b/banking-bench/src/main.rs
@@ -9,6 +9,7 @@ use {
solana_core::{
banking_stage::BankingStage,
banking_trace::{BankingPacketBatch, BankingTracer, BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT},
+ bundle_stage::bundle_account_locker::BundleAccountLocker,
},
solana_gossip::cluster_info::{ClusterInfo, Node},
solana_ledger::{
@@ -36,6 +37,7 @@ use {
solana_streamer::socket::SocketAddrSpace,
solana_tpu_client::tpu_client::DEFAULT_TPU_CONNECTION_POOL_SIZE,
std::{
+ collections::HashSet,
sync::{atomic::Ordering, Arc, RwLock},
thread::sleep,
time::{Duration, Instant},
@@ -57,9 +59,15 @@ fn check_txs(
let now = Instant::now();
let mut no_bank = false;
loop {
- if let Ok((_bank, (entry, _tick_height))) = receiver.recv_timeout(Duration::from_millis(10))
+ if let Ok(WorkingBankEntry {
+ bank: _,
+ entries_ticks,
+ }) = receiver.recv_timeout(Duration::from_millis(10))
{
- total += entry.transactions.len();
+ total += entries_ticks
+ .iter()
+ .map(|e| e.0.transactions.len())
+ .sum::();
}
if total >= ref_tx_count {
break;
@@ -463,6 +471,8 @@ fn main() {
Arc::new(connection_cache),
bank_forks.clone(),
&Arc::new(PrioritizationFeeCache::new(0u64)),
+ HashSet::default(),
+ BundleAccountLocker::default(),
);
// This is so that the signal_receiver does not go out of scope after the closure.
diff --git a/banks-server/Cargo.toml b/banks-server/Cargo.toml
index 1404d88b5cde4e..94f2531cec4b22 100644
--- a/banks-server/Cargo.toml
+++ b/banks-server/Cargo.toml
@@ -16,6 +16,7 @@ futures = { workspace = true }
solana-accounts-db = { workspace = true }
solana-banks-interface = { workspace = true }
solana-client = { workspace = true }
+solana-gossip = { workspace = true }
solana-runtime = { workspace = true }
solana-sdk = { workspace = true }
solana-send-transaction-service = { workspace = true }
diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs
index a04d542108e923..e9e09ee6a42f34 100644
--- a/banks-server/src/banks_server.rs
+++ b/banks-server/src/banks_server.rs
@@ -9,6 +9,7 @@ use {
TransactionSimulationDetails, TransactionStatus,
},
solana_client::connection_cache::ConnectionCache,
+ solana_gossip::cluster_info::ClusterInfo,
solana_runtime::{
bank::{Bank, TransactionSimulationResult},
bank_forks::BankForks,
@@ -438,7 +439,7 @@ pub async fn start_local_server(
pub async fn start_tcp_server(
listen_addr: SocketAddr,
- tpu_addr: SocketAddr,
+ cluster_info: Arc,
bank_forks: Arc>,
block_commitment_cache: Arc>,
connection_cache: Arc,
@@ -463,7 +464,7 @@ pub async fn start_tcp_server(
let (sender, receiver) = unbounded();
SendTransactionService::new::(
- tpu_addr,
+ cluster_info.clone(),
&bank_forks,
None,
receiver,
diff --git a/bootstrap b/bootstrap
new file mode 100755
index 00000000000000..d9b1eed6f43916
--- /dev/null
+++ b/bootstrap
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+set -eu
+
+BANK_HASH=$(cargo run --release --bin solana-ledger-tool -- -l config/bootstrap-validator bank-hash)
+
+# increase max file handle limit
+ulimit -Hn 1000000
+
+# if above fails, run:
+# sudo bash -c 'echo "* hard nofile 1000000" >> /etc/security/limits.conf'
+
+# NOTE: make sure tip-payment and tip-distribution program are deployed using the correct pubkeys
+RUST_LOG=INFO,solana_core::bundle_stage=DEBUG \
+ NDEBUG=1 ./multinode-demo/bootstrap-validator.sh \
+ --wait-for-supermajority 0 \
+ --expected-bank-hash "$BANK_HASH" \
+ --block-engine-url http://127.0.0.1 \
+ --relayer-url http://127.0.0.1:11226 \
+ --rpc-pubsub-enable-block-subscription \
+ --enable-rpc-transaction-history \
+ --tip-payment-program-pubkey T1pyyaTNZsKv2WcRAB8oVnk93mLJw2XzjtVYqCsaHqt \
+ --tip-distribution-program-pubkey 4R3gSG8BpU4t19KYj8CfnbtRpnT8gtk4dvTHxVRwc2r7 \
+ --commission-bps 0 \
+ --shred-receiver-address 127.0.0.1:1002 \
+ --trust-relayer-packets \
+ --trust-block-engine-packets
diff --git a/bundle/Cargo.toml b/bundle/Cargo.toml
new file mode 100644
index 00000000000000..babb13bcd78a26
--- /dev/null
+++ b/bundle/Cargo.toml
@@ -0,0 +1,35 @@
+[package]
+name = "solana-bundle"
+description = "Library related to handling bundles"
+documentation = "https://docs.rs/solana-bundle"
+readme = "../README.md"
+version = { workspace = true }
+authors = { workspace = true }
+repository = { workspace = true }
+homepage = { workspace = true }
+license = { workspace = true }
+edition = { workspace = true }
+
+[dependencies]
+anchor-lang = { workspace = true }
+itertools = { workspace = true }
+log = { workspace = true }
+serde = { workspace = true }
+solana-accounts-db = { workspace = true }
+solana-ledger = { workspace = true }
+solana-logger = { workspace = true }
+solana-measure = { workspace = true }
+solana-poh = { workspace = true }
+solana-program-runtime = { workspace = true }
+solana-runtime = { workspace = true }
+solana-sdk = { workspace = true }
+solana-transaction-status = { workspace = true }
+thiserror = { workspace = true }
+
+[dev-dependencies]
+assert_matches = { workspace = true }
+solana-logger = { workspace = true }
+
+[lib]
+crate-type = ["lib"]
+name = "solana_bundle"
diff --git a/bundle/src/bundle_execution.rs b/bundle/src/bundle_execution.rs
new file mode 100644
index 00000000000000..b3a0d51a41f737
--- /dev/null
+++ b/bundle/src/bundle_execution.rs
@@ -0,0 +1,1199 @@
+use {
+ itertools::izip,
+ log::*,
+ solana_accounts_db::{
+ account_overrides::AccountOverrides, accounts::TransactionLoadResult,
+ transaction_results::TransactionExecutionResult,
+ },
+ solana_ledger::token_balances::collect_token_balances,
+ solana_measure::{measure::Measure, measure_us},
+ solana_program_runtime::timings::ExecuteTimings,
+ solana_runtime::{
+ bank::{Bank, LoadAndExecuteTransactionsOutput, TransactionBalances},
+ transaction_batch::TransactionBatch,
+ },
+ solana_sdk::{
+ account::AccountSharedData,
+ bundle::SanitizedBundle,
+ pubkey::Pubkey,
+ saturating_add_assign,
+ signature::Signature,
+ transaction::{SanitizedTransaction, TransactionError, VersionedTransaction},
+ },
+ solana_transaction_status::{token_balances::TransactionTokenBalances, PreBalanceInfo},
+ std::{
+ cmp::{max, min},
+ time::{Duration, Instant},
+ },
+ thiserror::Error,
+};
+
+#[derive(Clone, Default)]
+pub struct BundleExecutionMetrics {
+ pub num_retries: u64,
+ pub collect_balances_us: u64,
+ pub load_execute_us: u64,
+ pub collect_pre_post_accounts_us: u64,
+ pub cache_accounts_us: u64,
+ pub execute_timings: ExecuteTimings,
+}
+
+/// Contains the results from executing each TransactionBatch with a final result associated with it
+/// Note that if !result.is_ok(), bundle_transaction_results will not contain the output for every transaction.
+pub struct LoadAndExecuteBundleOutput<'a> {
+ bundle_transaction_results: Vec>,
+ result: LoadAndExecuteBundleResult<()>,
+ metrics: BundleExecutionMetrics,
+}
+
+impl<'a> LoadAndExecuteBundleOutput<'a> {
+ pub fn executed_ok(&self) -> bool {
+ self.result.is_ok()
+ }
+
+ pub fn result(&self) -> &LoadAndExecuteBundleResult<()> {
+ &self.result
+ }
+
+ pub fn bundle_transaction_results_mut(&mut self) -> &'a mut [BundleTransactionsOutput] {
+ &mut self.bundle_transaction_results
+ }
+
+ pub fn bundle_transaction_results(&self) -> &'a [BundleTransactionsOutput] {
+ &self.bundle_transaction_results
+ }
+
+ pub fn executed_transaction_batches(&self) -> Vec> {
+ self.bundle_transaction_results
+ .iter()
+ .map(|br| br.executed_versioned_transactions())
+ .collect()
+ }
+
+ pub fn metrics(&self) -> BundleExecutionMetrics {
+ self.metrics.clone()
+ }
+}
+
+#[derive(Clone, Debug, Error)]
+pub enum LoadAndExecuteBundleError {
+ #[error("Bundle execution timed out")]
+ ProcessingTimeExceeded(Duration),
+
+ #[error(
+ "A transaction in the bundle encountered a lock error: [signature={:?}, transaction_error={:?}]",
+ signature,
+ transaction_error
+ )]
+ LockError {
+ signature: Signature,
+ transaction_error: TransactionError,
+ },
+
+ #[error(
+ "A transaction in the bundle failed to execute: [signature={:?}, execution_result={:?}",
+ signature,
+ execution_result
+ )]
+ TransactionError {
+ signature: Signature,
+ // Box reduces the size between variants in the Error
+ execution_result: Box,
+ },
+
+ #[error("Invalid pre or post accounts")]
+ InvalidPreOrPostAccounts,
+}
+
+pub struct BundleTransactionsOutput<'a> {
+ transactions: &'a [SanitizedTransaction],
+ load_and_execute_transactions_output: LoadAndExecuteTransactionsOutput,
+ pre_balance_info: PreBalanceInfo,
+ post_balance_info: (TransactionBalances, TransactionTokenBalances),
+ // the length of the outer vector should be the same as transactions.len()
+ // for indices that didn't get executed, expect a None.
+ pre_tx_execution_accounts: Vec>>,
+ post_tx_execution_accounts: Vec >>,
+}
+
+impl<'a> BundleTransactionsOutput<'a> {
+ pub fn executed_versioned_transactions(&self) -> Vec {
+ self.transactions
+ .iter()
+ .zip(
+ self.load_and_execute_transactions_output
+ .execution_results
+ .iter(),
+ )
+ .filter_map(|(tx, exec_result)| {
+ exec_result
+ .was_executed()
+ .then_some(tx.to_versioned_transaction())
+ })
+ .collect()
+ }
+
+ pub fn executed_transactions(&self) -> Vec<&'a SanitizedTransaction> {
+ self.transactions
+ .iter()
+ .zip(
+ self.load_and_execute_transactions_output
+ .execution_results
+ .iter(),
+ )
+ .filter_map(|(tx, exec_result)| exec_result.was_executed().then_some(tx))
+ .collect()
+ }
+
+ pub fn load_and_execute_transactions_output(&self) -> &LoadAndExecuteTransactionsOutput {
+ &self.load_and_execute_transactions_output
+ }
+
+ pub fn transactions(&self) -> &[SanitizedTransaction] {
+ self.transactions
+ }
+
+ pub fn loaded_transactions_mut(&mut self) -> &mut [TransactionLoadResult] {
+ &mut self
+ .load_and_execute_transactions_output
+ .loaded_transactions
+ }
+
+ pub fn execution_results(&self) -> &[TransactionExecutionResult] {
+ &self.load_and_execute_transactions_output.execution_results
+ }
+
+ pub fn pre_balance_info(&mut self) -> &mut PreBalanceInfo {
+ &mut self.pre_balance_info
+ }
+
+ pub fn post_balance_info(&self) -> &(TransactionBalances, TransactionTokenBalances) {
+ &self.post_balance_info
+ }
+
+ pub fn pre_tx_execution_accounts(&self) -> &Vec>> {
+ &self.pre_tx_execution_accounts
+ }
+
+ pub fn post_tx_execution_accounts(&self) -> &Vec >> {
+ &self.post_tx_execution_accounts
+ }
+}
+
+pub type LoadAndExecuteBundleResult = Result;
+
+/// Return an Error if a transaction was executed and reverted
+/// NOTE: `execution_results` are zipped with `sanitized_txs` so it's expected a sanitized tx at
+/// position i has a corresponding execution result at position i within the `execution_results`
+/// slice
+pub fn check_bundle_execution_results<'a>(
+ execution_results: &'a [TransactionExecutionResult],
+ sanitized_txs: &'a [SanitizedTransaction],
+) -> Result<(), (&'a SanitizedTransaction, &'a TransactionExecutionResult)> {
+ for (exec_results, sanitized_tx) in execution_results.iter().zip(sanitized_txs) {
+ match exec_results {
+ TransactionExecutionResult::Executed { details, .. } => {
+ if details.status.is_err() {
+ return Err((sanitized_tx, exec_results));
+ }
+ }
+ TransactionExecutionResult::NotExecuted(e) => {
+ if !matches!(e, TransactionError::AccountInUse) {
+ return Err((sanitized_tx, exec_results));
+ }
+ }
+ }
+ }
+ Ok(())
+}
+
+/// Executing a bundle is somewhat complicated compared to executing single transactions. In order to
+/// avoid duplicate logic for execution and simulation, this function can be leveraged.
+///
+/// Assumptions for the caller:
+/// - all transactions were signed properly
+/// - user has deduplicated transactions inside the bundle
+///
+/// TODO (LB):
+/// - given a bundle with 3 transactions that write lock the following accounts: [A, B, C], on failure of B
+/// we should add in the BundleTransactionsOutput of A and C and return the error for B.
+#[allow(clippy::too_many_arguments)]
+pub fn load_and_execute_bundle<'a>(
+ bank: &Bank,
+ bundle: &'a SanitizedBundle,
+ // Max blockhash age
+ max_age: usize,
+ // Upper bound on execution time for a bundle
+ max_processing_time: &Duration,
+ // Execution data logging
+ enable_cpi_recording: bool,
+ enable_log_recording: bool,
+ enable_return_data_recording: bool,
+ enable_balance_recording: bool,
+ log_messages_bytes_limit: &Option,
+ // simulation will not use the Bank's account locks when building the TransactionBatch
+ // if simulating on an unfrozen bank, this is helpful to avoid stalling replay and use whatever
+ // state the accounts are in at the current time
+ is_simulation: bool,
+ account_overrides: Option<&mut AccountOverrides>,
+ // these must be the same length as the bundle's transactions
+ // allows one to read account state before and after execution of each transaction in the bundle
+ // will use AccountsOverride + Bank
+ pre_execution_accounts: &Vec>>,
+ post_execution_accounts: &Vec >>,
+) -> LoadAndExecuteBundleOutput<'a> {
+ if pre_execution_accounts.len() != post_execution_accounts.len()
+ || post_execution_accounts.len() != bundle.transactions.len()
+ {
+ return LoadAndExecuteBundleOutput {
+ bundle_transaction_results: vec![],
+ result: Err(LoadAndExecuteBundleError::InvalidPreOrPostAccounts),
+ metrics: BundleExecutionMetrics::default(),
+ };
+ }
+
+ let mut binding = AccountOverrides::default();
+ let account_overrides = account_overrides.unwrap_or(&mut binding);
+ if is_simulation {
+ bundle
+ .transactions
+ .iter()
+ .map(|tx| tx.message().account_keys())
+ .for_each(|account_keys| {
+ account_overrides.upsert_account_overrides(
+ bank.get_account_overrides_for_simulation(&account_keys),
+ );
+ });
+ }
+
+ let mut chunk_start = 0;
+ let start_time = Instant::now();
+
+ let mut bundle_transaction_results = vec![];
+ let mut metrics = BundleExecutionMetrics::default();
+
+ while chunk_start != bundle.transactions.len() {
+ if start_time.elapsed() > *max_processing_time {
+ trace!("bundle: {} took too long to execute", bundle.bundle_id);
+ return LoadAndExecuteBundleOutput {
+ bundle_transaction_results,
+ metrics,
+ result: Err(LoadAndExecuteBundleError::ProcessingTimeExceeded(
+ start_time.elapsed(),
+ )),
+ };
+ }
+
+ let chunk_end = min(bundle.transactions.len(), chunk_start.saturating_add(128));
+ let chunk = &bundle.transactions[chunk_start..chunk_end];
+
+ // Note: these batches are dropped after execution and before record/commit, which is atypical
+ // compared to BankingStage which holds account locks until record + commit to avoid race conditions with
+ // other BankingStage threads. However, the caller of this method, BundleConsumer, will use BundleAccountLocks
+ // to hold RW locks across all transactions in a bundle until its processed.
+ let batch = if is_simulation {
+ bank.prepare_sequential_sanitized_batch_with_results_for_simulation(chunk)
+ } else {
+ bank.prepare_sequential_sanitized_batch_with_results(chunk)
+ };
+
+ debug!(
+ "bundle: {} batch num locks ok: {}",
+ bundle.bundle_id,
+ batch.lock_results().iter().filter(|lr| lr.is_ok()).count()
+ );
+
+ // Ensures that bundle lock results only return either:
+ // Ok(()) | Err(TransactionError::AccountInUse)
+ // If the error isn't one of those, then error out
+ if let Some((transaction, lock_failure)) = batch.check_bundle_lock_results() {
+ debug!(
+ "bundle: {} lock error; signature: {} error: {}",
+ bundle.bundle_id,
+ transaction.signature(),
+ lock_failure
+ );
+ return LoadAndExecuteBundleOutput {
+ bundle_transaction_results,
+ metrics,
+ result: Err(LoadAndExecuteBundleError::LockError {
+ signature: *transaction.signature(),
+ transaction_error: lock_failure.clone(),
+ }),
+ };
+ }
+
+ let mut pre_balance_info = PreBalanceInfo::default();
+ let (_, collect_balances_us) = measure_us!({
+ if enable_balance_recording {
+ pre_balance_info.native =
+ bank.collect_balances_with_cache(&batch, Some(account_overrides));
+ pre_balance_info.token = collect_token_balances(
+ bank,
+ &batch,
+ &mut pre_balance_info.mint_decimals,
+ Some(account_overrides),
+ );
+ }
+ });
+ saturating_add_assign!(metrics.collect_balances_us, collect_balances_us);
+
+ let end = min(
+ chunk_start.saturating_add(batch.sanitized_transactions().len()),
+ pre_execution_accounts.len(),
+ );
+
+ let m = Measure::start("accounts");
+ let accounts_requested = &pre_execution_accounts[chunk_start..end];
+ let pre_tx_execution_accounts =
+ get_account_transactions(bank, account_overrides, accounts_requested, &batch);
+ saturating_add_assign!(metrics.collect_pre_post_accounts_us, m.end_as_us());
+
+ let (mut load_and_execute_transactions_output, load_execute_us) = measure_us!(bank
+ .load_and_execute_transactions(
+ &batch,
+ max_age,
+ enable_cpi_recording,
+ enable_log_recording,
+ enable_return_data_recording,
+ &mut metrics.execute_timings,
+ Some(account_overrides),
+ *log_messages_bytes_limit,
+ true
+ ));
+ debug!(
+ "bundle id: {} loaded_transactions: {:?}",
+ bundle.bundle_id, load_and_execute_transactions_output.loaded_transactions
+ );
+ saturating_add_assign!(metrics.load_execute_us, load_execute_us);
+
+ // All transactions within a bundle are expected to be executable + not fail
+ // If there's any transactions that executed and failed or didn't execute due to
+ // unexpected failures (not locking related), bail out of bundle execution early.
+ if let Err((failing_tx, exec_result)) = check_bundle_execution_results(
+ load_and_execute_transactions_output
+ .execution_results
+ .as_slice(),
+ batch.sanitized_transactions(),
+ ) {
+ // TODO (LB): we should try to return partial results here for successful bundles in a parallel batch.
+ // given a bundle that write locks the following accounts [[A], [B], [C]]
+ // when B fails, we could return the execution results for A and C, but leave B out.
+ // however, if we have bundle that write locks accounts [[A_1], [A_2], [B], [C]] and B fails
+ // we'll get the results for A_1 but not [A_2], [B], [C] due to the way this loop executes.
+ debug!(
+ "bundle: {} execution error; signature: {} error: {:?}",
+ bundle.bundle_id,
+ failing_tx.signature(),
+ exec_result
+ );
+ return LoadAndExecuteBundleOutput {
+ bundle_transaction_results,
+ metrics,
+ result: Err(LoadAndExecuteBundleError::TransactionError {
+ signature: *failing_tx.signature(),
+ execution_result: Box::new(exec_result.clone()),
+ }),
+ };
+ }
+
+ // If none of the transactions were executed, most likely an AccountInUse error
+ // need to retry to ensure that all transactions in the bundle are executed.
+ if !load_and_execute_transactions_output
+ .execution_results
+ .iter()
+ .any(|r| r.was_executed())
+ {
+ saturating_add_assign!(metrics.num_retries, 1);
+ debug!(
+ "bundle: {} no transaction executed, retrying",
+ bundle.bundle_id
+ );
+ continue;
+ }
+
+ // Cache accounts so next iterations of loop can load cached state instead of using
+ // AccountsDB, which will contain stale account state because results aren't committed
+ // to the bank yet.
+ // NOTE: Bank::collect_accounts_to_store does not handle any state changes related to
+ // failed, non-nonce transactions.
+ let m = Measure::start("cache");
+ let accounts = bank.collect_accounts_to_store(
+ batch.sanitized_transactions(),
+ &load_and_execute_transactions_output.execution_results,
+ &mut load_and_execute_transactions_output.loaded_transactions,
+ );
+ for (pubkey, data) in accounts {
+ account_overrides.set_account(pubkey, Some(data.clone()));
+ }
+ saturating_add_assign!(metrics.cache_accounts_us, m.end_as_us());
+
+ let end = max(
+ chunk_start.saturating_add(batch.sanitized_transactions().len()),
+ post_execution_accounts.len(),
+ );
+
+ let m = Measure::start("accounts");
+ let accounts_requested = &post_execution_accounts[chunk_start..end];
+ let post_tx_execution_accounts =
+ get_account_transactions(bank, account_overrides, accounts_requested, &batch);
+ saturating_add_assign!(metrics.collect_pre_post_accounts_us, m.end_as_us());
+
+ let ((post_balances, post_token_balances), collect_balances_us) =
+ measure_us!(if enable_balance_recording {
+ let post_balances =
+ bank.collect_balances_with_cache(&batch, Some(account_overrides));
+ let post_token_balances = collect_token_balances(
+ bank,
+ &batch,
+ &mut pre_balance_info.mint_decimals,
+ Some(account_overrides),
+ );
+ (post_balances, post_token_balances)
+ } else {
+ (
+ TransactionBalances::default(),
+ TransactionTokenBalances::default(),
+ )
+ });
+ saturating_add_assign!(metrics.collect_balances_us, collect_balances_us);
+
+ let processing_end = batch.lock_results().iter().position(|lr| lr.is_err());
+ if let Some(end) = processing_end {
+ chunk_start = chunk_start.saturating_add(end);
+ } else {
+ chunk_start = chunk_end;
+ }
+
+ bundle_transaction_results.push(BundleTransactionsOutput {
+ transactions: chunk,
+ load_and_execute_transactions_output,
+ pre_balance_info,
+ post_balance_info: (post_balances, post_token_balances),
+ pre_tx_execution_accounts,
+ post_tx_execution_accounts,
+ });
+ }
+
+ LoadAndExecuteBundleOutput {
+ bundle_transaction_results,
+ metrics,
+ result: Ok(()),
+ }
+}
+
+fn get_account_transactions(
+ bank: &Bank,
+ account_overrides: &AccountOverrides,
+ accounts: &[Option>],
+ batch: &TransactionBatch,
+) -> Vec>> {
+ let iter = izip!(batch.lock_results().iter(), accounts.iter());
+
+ iter.map(|(lock_result, accounts_requested)| {
+ if lock_result.is_ok() {
+ accounts_requested.as_ref().map(|accounts_requested| {
+ accounts_requested
+ .iter()
+ .map(|a| match account_overrides.get(a) {
+ None => (*a, bank.get_account(a).unwrap_or_default()),
+ Some(data) => (*a, data.clone()),
+ })
+ .collect()
+ })
+ } else {
+ None
+ }
+ })
+ .collect()
+}
+
+#[cfg(test)]
+mod tests {
+ use {
+ crate::bundle_execution::{load_and_execute_bundle, LoadAndExecuteBundleError},
+ assert_matches::assert_matches,
+ solana_ledger::genesis_utils::create_genesis_config,
+ solana_runtime::{bank::Bank, genesis_utils::GenesisConfigInfo},
+ solana_sdk::{
+ bundle::{derive_bundle_id_from_sanitized_transactions, SanitizedBundle},
+ clock::MAX_PROCESSING_AGE,
+ pubkey::Pubkey,
+ signature::{Keypair, Signer},
+ system_transaction::transfer,
+ transaction::{SanitizedTransaction, Transaction, TransactionError},
+ },
+ std::{
+ sync::{Arc, Barrier},
+ thread::{sleep, spawn},
+ time::Duration,
+ },
+ };
+
+ const MAX_PROCESSING_TIME: Duration = Duration::from_secs(1);
+ const LOG_MESSAGE_BYTES_LIMITS: Option = Some(100_000);
+ const MINT_AMOUNT_LAMPORTS: u64 = 1_000_000;
+
+ fn create_simple_test_bank(lamports: u64) -> (GenesisConfigInfo, Arc) {
+ let genesis_config_info = create_genesis_config(lamports);
+ let bank = Arc::new(Bank::new_for_tests(&genesis_config_info.genesis_config));
+ (genesis_config_info, bank)
+ }
+
+ fn make_bundle(txs: &[Transaction]) -> SanitizedBundle {
+ let transactions: Vec<_> = txs
+ .iter()
+ .map(|tx| SanitizedTransaction::try_from_legacy_transaction(tx.clone()).unwrap())
+ .collect();
+
+ let bundle_id = derive_bundle_id_from_sanitized_transactions(&transactions);
+
+ SanitizedBundle {
+ transactions,
+ bundle_id,
+ }
+ }
+
+ fn find_account_index(tx: &Transaction, account: &Pubkey) -> Option {
+ tx.message
+ .account_keys
+ .iter()
+ .position(|pubkey| account == pubkey)
+ }
+
+ /// A single, valid bundle shall execute successfully and return the correct BundleTransactionsOutput content
+ #[test]
+ fn test_single_transaction_bundle_success() {
+ const TRANSFER_AMOUNT: u64 = 1_000;
+ let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS);
+ let lamports_per_signature = bank
+ .get_lamports_per_signature_for_blockhash(&genesis_config_info.genesis_config.hash())
+ .unwrap();
+
+ let kp = Keypair::new();
+ let transactions = vec![transfer(
+ &genesis_config_info.mint_keypair,
+ &kp.pubkey(),
+ TRANSFER_AMOUNT,
+ genesis_config_info.genesis_config.hash(),
+ )];
+ let bundle = make_bundle(&transactions);
+ let default_accounts = vec![None; bundle.transactions.len()];
+
+ let execution_result = load_and_execute_bundle(
+ &bank,
+ &bundle,
+ MAX_PROCESSING_AGE,
+ &MAX_PROCESSING_TIME,
+ true,
+ true,
+ true,
+ true,
+ &LOG_MESSAGE_BYTES_LIMITS,
+ false,
+ None,
+ &default_accounts,
+ &default_accounts,
+ );
+
+ // make sure the bundle succeeded
+ assert!(execution_result.result.is_ok());
+
+ // check to make sure there was one batch returned with one transaction that was the same that was put in
+ assert_eq!(execution_result.bundle_transaction_results.len(), 1);
+ let tx_result = execution_result.bundle_transaction_results.get(0).unwrap();
+ assert_eq!(tx_result.transactions.len(), 1);
+ assert_eq!(tx_result.transactions[0], bundle.transactions[0]);
+
+ // make sure the transaction executed successfully
+ assert_eq!(
+ tx_result
+ .load_and_execute_transactions_output
+ .execution_results
+ .len(),
+ 1
+ );
+ let execution_result = tx_result
+ .load_and_execute_transactions_output
+ .execution_results
+ .get(0)
+ .unwrap();
+ assert!(execution_result.was_executed());
+ assert!(execution_result.was_executed_successfully());
+
+ // Make sure the post-balances are correct
+ assert_eq!(tx_result.pre_balance_info.native.len(), 1);
+ let post_tx_sol_balances = tx_result.post_balance_info.0.get(0).unwrap();
+
+ let minter_message_index =
+ find_account_index(&transactions[0], &genesis_config_info.mint_keypair.pubkey())
+ .unwrap();
+ let receiver_message_index = find_account_index(&transactions[0], &kp.pubkey()).unwrap();
+
+ assert_eq!(
+ post_tx_sol_balances[minter_message_index],
+ MINT_AMOUNT_LAMPORTS - lamports_per_signature - TRANSFER_AMOUNT
+ );
+ assert_eq!(
+ post_tx_sol_balances[receiver_message_index],
+ TRANSFER_AMOUNT
+ );
+ }
+
+ /// Test a simple failure
+ #[test]
+ fn test_single_transaction_bundle_fail() {
+ const TRANSFER_AMOUNT: u64 = 1_000;
+ let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS);
+
+ // kp has no funds, transfer will fail
+ let kp = Keypair::new();
+ let transactions = vec![transfer(
+ &kp,
+ &kp.pubkey(),
+ TRANSFER_AMOUNT,
+ genesis_config_info.genesis_config.hash(),
+ )];
+ let bundle = make_bundle(&transactions);
+
+ let default_accounts = vec![None; bundle.transactions.len()];
+ let execution_result = load_and_execute_bundle(
+ &bank,
+ &bundle,
+ MAX_PROCESSING_AGE,
+ &MAX_PROCESSING_TIME,
+ true,
+ true,
+ true,
+ true,
+ &LOG_MESSAGE_BYTES_LIMITS,
+ false,
+ None,
+ &default_accounts,
+ &default_accounts,
+ );
+
+ assert_eq!(execution_result.bundle_transaction_results.len(), 0);
+
+ assert!(execution_result.result.is_err());
+
+ match execution_result.result.unwrap_err() {
+ LoadAndExecuteBundleError::ProcessingTimeExceeded(_)
+ | LoadAndExecuteBundleError::LockError { .. }
+ | LoadAndExecuteBundleError::InvalidPreOrPostAccounts => {
+ unreachable!();
+ }
+ LoadAndExecuteBundleError::TransactionError {
+ signature,
+ execution_result,
+ } => {
+ assert_eq!(signature, *bundle.transactions[0].signature());
+ assert!(!execution_result.was_executed());
+ }
+ }
+ }
+
+ /// Tests a multi-tx bundle that succeeds. Checks the returned results
+ #[test]
+ fn test_multi_transaction_bundle_success() {
+ const TRANSFER_AMOUNT_1: u64 = 100_000;
+ const TRANSFER_AMOUNT_2: u64 = 50_000;
+ const TRANSFER_AMOUNT_3: u64 = 10_000;
+ let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS);
+ let lamports_per_signature = bank
+ .get_lamports_per_signature_for_blockhash(&genesis_config_info.genesis_config.hash())
+ .unwrap();
+
+ // mint transfers 100k to 1
+ // 1 transfers 50k to 2
+ // 2 transfers 10k to 3
+ // should get executed in 3 batches [[1], [2], [3]]
+ let kp1 = Keypair::new();
+ let kp2 = Keypair::new();
+ let kp3 = Keypair::new();
+ let transactions = vec![
+ transfer(
+ &genesis_config_info.mint_keypair,
+ &kp1.pubkey(),
+ TRANSFER_AMOUNT_1,
+ genesis_config_info.genesis_config.hash(),
+ ),
+ transfer(
+ &kp1,
+ &kp2.pubkey(),
+ TRANSFER_AMOUNT_2,
+ genesis_config_info.genesis_config.hash(),
+ ),
+ transfer(
+ &kp2,
+ &kp3.pubkey(),
+ TRANSFER_AMOUNT_3,
+ genesis_config_info.genesis_config.hash(),
+ ),
+ ];
+ let bundle = make_bundle(&transactions);
+
+ let default_accounts = vec![None; bundle.transactions.len()];
+ let execution_result = load_and_execute_bundle(
+ &bank,
+ &bundle,
+ MAX_PROCESSING_AGE,
+ &MAX_PROCESSING_TIME,
+ true,
+ true,
+ true,
+ true,
+ &LOG_MESSAGE_BYTES_LIMITS,
+ false,
+ None,
+ &default_accounts,
+ &default_accounts,
+ );
+
+ assert!(execution_result.result.is_ok());
+ assert_eq!(execution_result.bundle_transaction_results.len(), 3);
+
+ // first batch contains the first tx that was executed
+ assert_eq!(
+ execution_result.bundle_transaction_results[0].transactions,
+ bundle.transactions
+ );
+ assert_eq!(
+ execution_result.bundle_transaction_results[0]
+ .load_and_execute_transactions_output
+ .execution_results
+ .len(),
+ 3
+ );
+ assert!(execution_result.bundle_transaction_results[0]
+ .load_and_execute_transactions_output
+ .execution_results[0]
+ .was_executed_successfully());
+ assert_eq!(
+ execution_result.bundle_transaction_results[0]
+ .load_and_execute_transactions_output
+ .execution_results[1]
+ .flattened_result(),
+ Err(TransactionError::AccountInUse)
+ );
+ assert_eq!(
+ execution_result.bundle_transaction_results[0]
+ .load_and_execute_transactions_output
+ .execution_results[2]
+ .flattened_result(),
+ Err(TransactionError::AccountInUse)
+ );
+ assert_eq!(
+ execution_result.bundle_transaction_results[0]
+ .pre_balance_info
+ .native
+ .len(),
+ 3
+ );
+ assert_eq!(
+ execution_result.bundle_transaction_results[0]
+ .post_balance_info
+ .0
+ .len(),
+ 3
+ );
+
+ let minter_index =
+ find_account_index(&transactions[0], &genesis_config_info.mint_keypair.pubkey())
+ .unwrap();
+ let kp1_index = find_account_index(&transactions[0], &kp1.pubkey()).unwrap();
+
+ assert_eq!(
+ execution_result.bundle_transaction_results[0]
+ .post_balance_info
+ .0[0][minter_index],
+ MINT_AMOUNT_LAMPORTS - lamports_per_signature - TRANSFER_AMOUNT_1
+ );
+
+ assert_eq!(
+ execution_result.bundle_transaction_results[0]
+ .post_balance_info
+ .0[0][kp1_index],
+ TRANSFER_AMOUNT_1
+ );
+
+ // in the second batch, the second transaction was executed
+ assert_eq!(
+ execution_result.bundle_transaction_results[1]
+ .transactions
+ .to_owned(),
+ bundle.transactions[1..]
+ );
+ assert_eq!(
+ execution_result.bundle_transaction_results[1]
+ .load_and_execute_transactions_output
+ .execution_results
+ .len(),
+ 2
+ );
+ assert!(execution_result.bundle_transaction_results[1]
+ .load_and_execute_transactions_output
+ .execution_results[0]
+ .was_executed_successfully());
+ assert_eq!(
+ execution_result.bundle_transaction_results[1]
+ .load_and_execute_transactions_output
+ .execution_results[1]
+ .flattened_result(),
+ Err(TransactionError::AccountInUse)
+ );
+
+ assert_eq!(
+ execution_result.bundle_transaction_results[1]
+ .pre_balance_info
+ .native
+ .len(),
+ 2
+ );
+ assert_eq!(
+ execution_result.bundle_transaction_results[1]
+ .post_balance_info
+ .0
+ .len(),
+ 2
+ );
+
+ let kp1_index = find_account_index(&transactions[1], &kp1.pubkey()).unwrap();
+ let kp2_index = find_account_index(&transactions[1], &kp2.pubkey()).unwrap();
+
+ assert_eq!(
+ execution_result.bundle_transaction_results[1]
+ .post_balance_info
+ .0[0][kp1_index],
+ TRANSFER_AMOUNT_1 - lamports_per_signature - TRANSFER_AMOUNT_2
+ );
+
+ assert_eq!(
+ execution_result.bundle_transaction_results[1]
+ .post_balance_info
+ .0[0][kp2_index],
+ TRANSFER_AMOUNT_2
+ );
+
+ // in the third batch, the third transaction was executed
+ assert_eq!(
+ execution_result.bundle_transaction_results[2]
+ .transactions
+ .to_owned(),
+ bundle.transactions[2..]
+ );
+ assert_eq!(
+ execution_result.bundle_transaction_results[2]
+ .load_and_execute_transactions_output
+ .execution_results
+ .len(),
+ 1
+ );
+ assert!(execution_result.bundle_transaction_results[2]
+ .load_and_execute_transactions_output
+ .execution_results[0]
+ .was_executed_successfully());
+
+ assert_eq!(
+ execution_result.bundle_transaction_results[2]
+ .pre_balance_info
+ .native
+ .len(),
+ 1
+ );
+ assert_eq!(
+ execution_result.bundle_transaction_results[2]
+ .post_balance_info
+ .0
+ .len(),
+ 1
+ );
+
+ let kp2_index = find_account_index(&transactions[2], &kp2.pubkey()).unwrap();
+ let kp3_index = find_account_index(&transactions[2], &kp3.pubkey()).unwrap();
+
+ assert_eq!(
+ execution_result.bundle_transaction_results[2]
+ .post_balance_info
+ .0[0][kp2_index],
+ TRANSFER_AMOUNT_2 - lamports_per_signature - TRANSFER_AMOUNT_3
+ );
+
+ assert_eq!(
+ execution_result.bundle_transaction_results[2]
+ .post_balance_info
+ .0[0][kp3_index],
+ TRANSFER_AMOUNT_3
+ );
+ }
+
+ /// Tests a multi-tx bundle with the middle transaction failing.
+ #[test]
+ fn test_multi_transaction_bundle_fails() {
+ let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS);
+
+ let kp1 = Keypair::new();
+ let kp2 = Keypair::new();
+ let kp3 = Keypair::new();
+ let transactions = vec![
+ transfer(
+ &genesis_config_info.mint_keypair,
+ &kp1.pubkey(),
+ 100_000,
+ genesis_config_info.genesis_config.hash(),
+ ),
+ transfer(
+ &kp2,
+ &kp3.pubkey(),
+ 100_000,
+ genesis_config_info.genesis_config.hash(),
+ ),
+ transfer(
+ &kp1,
+ &kp2.pubkey(),
+ 100_000,
+ genesis_config_info.genesis_config.hash(),
+ ),
+ ];
+ let bundle = make_bundle(&transactions);
+
+ let default_accounts = vec![None; bundle.transactions.len()];
+ let execution_result = load_and_execute_bundle(
+ &bank,
+ &bundle,
+ MAX_PROCESSING_AGE,
+ &MAX_PROCESSING_TIME,
+ true,
+ true,
+ true,
+ true,
+ &LOG_MESSAGE_BYTES_LIMITS,
+ false,
+ None,
+ &default_accounts,
+ &default_accounts,
+ );
+ match execution_result.result.as_ref().unwrap_err() {
+ LoadAndExecuteBundleError::ProcessingTimeExceeded(_)
+ | LoadAndExecuteBundleError::LockError { .. }
+ | LoadAndExecuteBundleError::InvalidPreOrPostAccounts => {
+ unreachable!();
+ }
+
+ LoadAndExecuteBundleError::TransactionError {
+ signature,
+ execution_result: tx_failure,
+ } => {
+ assert_eq!(signature, bundle.transactions[1].signature());
+ assert_eq!(
+ tx_failure.flattened_result(),
+ Err(TransactionError::AccountNotFound)
+ );
+ assert_eq!(execution_result.bundle_transaction_results().len(), 0);
+ }
+ }
+ }
+
+ /// Tests that when the max processing time is exceeded, the bundle is an error
+ #[test]
+ fn test_bundle_max_processing_time_exceeded() {
+ let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS);
+
+ let kp = Keypair::new();
+ let transactions = vec![transfer(
+ &genesis_config_info.mint_keypair,
+ &kp.pubkey(),
+ 1,
+ genesis_config_info.genesis_config.hash(),
+ )];
+ let bundle = make_bundle(&transactions);
+
+ let locked_transfer = vec![SanitizedTransaction::from_transaction_for_tests(transfer(
+ &genesis_config_info.mint_keypair,
+ &kp.pubkey(),
+ 2,
+ genesis_config_info.genesis_config.hash(),
+ ))];
+
+ // locks it and prevents execution bc write lock on genesis_config_info.mint_keypair + kp.pubkey() held
+ let _batch = bank.prepare_sanitized_batch(&locked_transfer);
+
+ let default = vec![None; bundle.transactions.len()];
+ let result = load_and_execute_bundle(
+ &bank,
+ &bundle,
+ MAX_PROCESSING_AGE,
+ &Duration::from_millis(100),
+ false,
+ false,
+ false,
+ false,
+ &None,
+ false,
+ None,
+ &default,
+ &default,
+ );
+ assert_matches!(
+ result.result,
+ Err(LoadAndExecuteBundleError::ProcessingTimeExceeded(_))
+ );
+ }
+
+ #[test]
+ fn test_simulate_bundle_with_locked_account_works() {
+ let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS);
+
+ let kp = Keypair::new();
+ let transactions = vec![transfer(
+ &genesis_config_info.mint_keypair,
+ &kp.pubkey(),
+ 1,
+ genesis_config_info.genesis_config.hash(),
+ )];
+ let bundle = make_bundle(&transactions);
+
+ let locked_transfer = vec![SanitizedTransaction::from_transaction_for_tests(transfer(
+ &genesis_config_info.mint_keypair,
+ &kp.pubkey(),
+ 2,
+ genesis_config_info.genesis_config.hash(),
+ ))];
+
+ let _batch = bank.prepare_sanitized_batch(&locked_transfer);
+
+ // simulation ignores account locks so you can simulate bundles on unfrozen banks
+ let default = vec![None; bundle.transactions.len()];
+ let result = load_and_execute_bundle(
+ &bank,
+ &bundle,
+ MAX_PROCESSING_AGE,
+ &Duration::from_millis(100),
+ false,
+ false,
+ false,
+ false,
+ &None,
+ true,
+ None,
+ &default,
+ &default,
+ );
+ assert!(result.result.is_ok());
+ }
+
+ /// Creates a multi-tx bundle and temporarily locks the accounts for one of the transactions in a bundle.
+ /// Ensures the result is what's expected
+ #[test]
+ fn test_bundle_works_with_released_account_locks() {
+ let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS);
+ let barrier = Arc::new(Barrier::new(2));
+
+ let kp = Keypair::new();
+
+ let transactions = vec![transfer(
+ &genesis_config_info.mint_keypair,
+ &kp.pubkey(),
+ 1,
+ genesis_config_info.genesis_config.hash(),
+ )];
+ let bundle = Arc::new(make_bundle(&transactions));
+
+ let locked_transfer = vec![SanitizedTransaction::from_transaction_for_tests(transfer(
+ &genesis_config_info.mint_keypair,
+ &kp.pubkey(),
+ 2,
+ genesis_config_info.genesis_config.hash(),
+ ))];
+
+ // background thread locks the accounts for a bit then unlocks them
+ let thread = {
+ let barrier = barrier.clone();
+ let bank = bank.clone();
+ spawn(move || {
+ let batch = bank.prepare_sanitized_batch(&locked_transfer);
+ barrier.wait();
+ sleep(Duration::from_millis(500));
+ drop(batch);
+ })
+ };
+
+ let _ = barrier.wait();
+
+ // load_and_execute_bundle should spin for a bit then process after the 500ms sleep is over
+ let default = vec![None; bundle.transactions.len()];
+ let result = load_and_execute_bundle(
+ &bank,
+ &bundle,
+ MAX_PROCESSING_AGE,
+ &Duration::from_secs(2),
+ false,
+ false,
+ false,
+ false,
+ &None,
+ false,
+ None,
+ &default,
+ &default,
+ );
+ assert!(result.result.is_ok());
+
+ thread.join().unwrap();
+ }
+
+ /// Tests that when the max processing time is exceeded, the bundle is an error
+ #[test]
+ fn test_bundle_bad_pre_post_accounts() {
+ let (genesis_config_info, bank) = create_simple_test_bank(MINT_AMOUNT_LAMPORTS);
+
+ let kp = Keypair::new();
+ let transactions = vec![transfer(
+ &genesis_config_info.mint_keypair,
+ &kp.pubkey(),
+ 1,
+ genesis_config_info.genesis_config.hash(),
+ )];
+ let bundle = make_bundle(&transactions);
+
+ let result = load_and_execute_bundle(
+ &bank,
+ &bundle,
+ MAX_PROCESSING_AGE,
+ &Duration::from_millis(100),
+ false,
+ false,
+ false,
+ false,
+ &None,
+ false,
+ None,
+ &vec![None; 2],
+ &vec![None; bundle.transactions.len()],
+ );
+ assert_matches!(
+ result.result,
+ Err(LoadAndExecuteBundleError::InvalidPreOrPostAccounts)
+ );
+
+ let result = load_and_execute_bundle(
+ &bank,
+ &bundle,
+ MAX_PROCESSING_AGE,
+ &Duration::from_millis(100),
+ false,
+ false,
+ false,
+ false,
+ &None,
+ false,
+ None,
+ &vec![None; bundle.transactions.len()],
+ &vec![None; 2],
+ );
+ assert_matches!(
+ result.result,
+ Err(LoadAndExecuteBundleError::InvalidPreOrPostAccounts)
+ );
+ }
+}
diff --git a/bundle/src/lib.rs b/bundle/src/lib.rs
new file mode 100644
index 00000000000000..a93e0d3d178a86
--- /dev/null
+++ b/bundle/src/lib.rs
@@ -0,0 +1,60 @@
+use {
+ crate::bundle_execution::LoadAndExecuteBundleError,
+ anchor_lang::error::Error,
+ serde::{Deserialize, Serialize},
+ solana_poh::poh_recorder::PohRecorderError,
+ solana_sdk::pubkey::Pubkey,
+ thiserror::Error,
+};
+
+pub mod bundle_execution;
+
+#[derive(Error, Debug, Clone, Serialize, Deserialize, PartialEq)]
+pub enum TipError {
+ #[error("account is missing from bank: {0}")]
+ AccountMissing(Pubkey),
+
+ #[error("Anchor error: {0}")]
+ AnchorError(String),
+
+ #[error("Lock error")]
+ LockError,
+
+ #[error("Error executing initialize programs")]
+ InitializeProgramsError,
+
+ #[error("Error cranking tip programs")]
+ CrankTipError,
+}
+
+impl From for TipError {
+ fn from(anchor_err: Error) -> Self {
+ match anchor_err {
+ Error::AnchorError(e) => Self::AnchorError(e.error_msg),
+ Error::ProgramError(e) => Self::AnchorError(e.to_string()),
+ }
+ }
+}
+
+pub type BundleExecutionResult = Result;
+
+#[derive(Error, Debug, Clone)]
+pub enum BundleExecutionError {
+ #[error("The bank has hit the max allotted time for processing transactions")]
+ BankProcessingTimeLimitReached,
+
+ #[error("The bundle exceeds the cost model")]
+ ExceedsCostModel,
+
+ #[error("Runtime error while executing the bundle: {0}")]
+ TransactionFailure(#[from] LoadAndExecuteBundleError),
+
+ #[error("Error locking bundle because a transaction is malformed")]
+ LockError,
+
+ #[error("PoH record error: {0}")]
+ PohRecordError(#[from] PohRecorderError),
+
+ #[error("Tip payment error {0}")]
+ TipError(#[from] TipError),
+}
diff --git a/ci/buildkite-pipeline-in-disk.sh b/ci/buildkite-pipeline-in-disk.sh
index 113b009aa4452e..98c35ba1575b07 100755
--- a/ci/buildkite-pipeline-in-disk.sh
+++ b/ci/buildkite-pipeline-in-disk.sh
@@ -289,7 +289,7 @@ if [[ -n $BUILDKITE_TAG ]]; then
start_pipeline "Tag pipeline for $BUILDKITE_TAG"
annotate --style info --context release-tag \
- "https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG"
+ "https://github.com/jito-foundation/jito-solana/releases/$BUILDKITE_TAG"
# Jump directly to the secondary build to publish release artifacts quickly
trigger_secondary_step
@@ -307,7 +307,7 @@ if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then
# Add helpful link back to the corresponding Github Pull Request
annotate --style info --context pr-backlink \
- "Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH"
+ "Github Pull Request: https://github.com/jito-foundation/jito-solana/$BUILDKITE_BRANCH"
if [[ $GITHUB_USER = "dependabot[bot]" ]]; then
command_step dependabot "ci/dependabot-pr.sh" 5
diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh
index 8535905bfee4d0..c3aaffd9f86095 100755
--- a/ci/buildkite-pipeline.sh
+++ b/ci/buildkite-pipeline.sh
@@ -313,7 +313,7 @@ if [[ -n $BUILDKITE_TAG ]]; then
start_pipeline "Tag pipeline for $BUILDKITE_TAG"
annotate --style info --context release-tag \
- "https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG"
+ "https://github.com/jito-foundation/jito-solana/releases/$BUILDKITE_TAG"
# Jump directly to the secondary build to publish release artifacts quickly
trigger_secondary_step
@@ -331,7 +331,7 @@ if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then
# Add helpful link back to the corresponding Github Pull Request
annotate --style info --context pr-backlink \
- "Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH"
+ "Github Pull Request: https://github.com/jito-foundation/jito-solana/$BUILDKITE_BRANCH"
if [[ $GITHUB_USER = "dependabot[bot]" ]]; then
command_step dependabot "ci/dependabot-pr.sh" 5
diff --git a/ci/buildkite-secondary.yml b/ci/buildkite-secondary.yml
index c8bf7b4fd9fd57..48aa4d95f45b63 100644
--- a/ci/buildkite-secondary.yml
+++ b/ci/buildkite-secondary.yml
@@ -18,34 +18,34 @@ steps:
agents:
queue: "release-build"
timeout_in_minutes: 5
- - wait
- - name: "publish docker"
- command: "sdk/docker-solana/build.sh"
- agents:
- queue: "release-build"
- timeout_in_minutes: 60
- - name: "publish crate"
- command: "ci/publish-crate.sh"
- agents:
- queue: "release-build"
- retry:
- manual:
- permit_on_passed: true
- timeout_in_minutes: 240
- branches: "!master"
- - name: "publish tarball (aarch64-apple-darwin)"
- command: "ci/publish-tarball.sh"
- agents:
- queue: "release-build-aarch64-apple-darwin"
- retry:
- manual:
- permit_on_passed: true
- timeout_in_minutes: 60
- - name: "publish tarball (x86_64-apple-darwin)"
- command: "ci/publish-tarball.sh"
- agents:
- queue: "release-build-x86_64-apple-darwin"
- retry:
- manual:
- permit_on_passed: true
- timeout_in_minutes: 60
+# - wait
+# - name: "publish docker"
+# command: "sdk/docker-solana/build.sh"
+# agents:
+# queue: "release-build"
+# timeout_in_minutes: 60
+# - name: "publish crate"
+# command: "ci/publish-crate.sh"
+# agents:
+# queue: "release-build"
+# retry:
+# manual:
+# permit_on_passed: true
+# timeout_in_minutes: 240
+# branches: "!master"
+# - name: "publish tarball (aarch64-apple-darwin)"
+# command: "ci/publish-tarball.sh"
+# agents:
+# queue: "release-build-aarch64-apple-darwin"
+# retry:
+# manual:
+# permit_on_passed: true
+# timeout_in_minutes: 60
+# - name: "publish tarball (x86_64-apple-darwin)"
+# command: "ci/publish-tarball.sh"
+# agents:
+# queue: "release-build-x86_64-apple-darwin"
+# retry:
+# manual:
+# permit_on_passed: true
+# timeout_in_minutes: 60
diff --git a/ci/buildkite-solana-private.sh b/ci/buildkite-solana-private.sh
index ede70e6229d5f8..d409667764462c 100755
--- a/ci/buildkite-solana-private.sh
+++ b/ci/buildkite-solana-private.sh
@@ -269,7 +269,7 @@ pull_or_push_steps() {
# start_pipeline "Tag pipeline for $BUILDKITE_TAG"
# annotate --style info --context release-tag \
-# "https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG"
+# "https://github.com/jito-foundation/jito-solana/releases/$BUILDKITE_TAG"
# # Jump directly to the secondary build to publish release artifacts quickly
# trigger_secondary_step
@@ -287,7 +287,7 @@ if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then
# Add helpful link back to the corresponding Github Pull Request
annotate --style info --context pr-backlink \
- "Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH"
+ "Github Pull Request: https://github.com/jito-foundation/jito-solana/$BUILDKITE_BRANCH"
if [[ $GITHUB_USER = "dependabot[bot]" ]]; then
command_step dependabot "ci/dependabot-pr.sh" 5
diff --git a/ci/channel-info.sh b/ci/channel-info.sh
index c82806454d012c..101583307f5577 100755
--- a/ci/channel-info.sh
+++ b/ci/channel-info.sh
@@ -11,7 +11,7 @@ here="$(dirname "$0")"
# shellcheck source=ci/semver_bash/semver.sh
source "$here"/semver_bash/semver.sh
-remote=https://github.com/solana-labs/solana.git
+remote=https://github.com/jito-foundation/jito-solana.git
# Fetch all vX.Y.Z tags
#
diff --git a/ci/check-crates.sh b/ci/check-crates.sh
index 655504ea11d8e3..d6a9ad9c397db9 100755
--- a/ci/check-crates.sh
+++ b/ci/check-crates.sh
@@ -31,6 +31,9 @@ printf "%s\n" "${files[@]}"
error_count=0
for file in "${files[@]}"; do
read -r crate_name package_publish workspace < <(toml get "$file" . | jq -r '(.package.name | tostring)+" "+(.package.publish | tostring)+" "+(.workspace | tostring)')
+ if [ "$crate_name" == "solana-bundle" ]; then
+ continue
+ fi
echo "=== $crate_name ($file) ==="
if [[ $package_publish = 'false' ]]; then
diff --git a/ci/publish-installer.sh b/ci/publish-installer.sh
index 4b5345ae0d26fe..71d8ef6985e4aa 100755
--- a/ci/publish-installer.sh
+++ b/ci/publish-installer.sh
@@ -26,14 +26,14 @@ fi
# upload install script
source ci/upload-ci-artifact.sh
-cat >release.solana.com-install <release.jito.wtf-install <>release.solana.com-install
+cat install/solana-install-init.sh >>release.jito.wtf-install
echo --- AWS S3 Store: "install"
-upload-s3-artifact "/solana/release.solana.com-install" "s3://release.solana.com/$CHANNEL_OR_TAG/install"
+upload-s3-artifact "/solana/release.jito.wtf-install" "s3://release.jito.wtf/$CHANNEL_OR_TAG/install"
echo Published to:
-ci/format-url.sh https://release.solana.com/"$CHANNEL_OR_TAG"/install
+ci/format-url.sh https://release.jito.wtf/"$CHANNEL_OR_TAG"/install
diff --git a/ci/publish-tarball.sh b/ci/publish-tarball.sh
index ff72bb7da2d066..ea132a73e1ebf4 100755
--- a/ci/publish-tarball.sh
+++ b/ci/publish-tarball.sh
@@ -119,16 +119,16 @@ for file in "${TARBALL_BASENAME}"-$TARGET.tar.bz2 "${TARBALL_BASENAME}"-$TARGET.
if [[ -n $BUILDKITE ]]; then
echo --- AWS S3 Store: "$file"
- upload-s3-artifact "/solana/$file" s3://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
+ upload-s3-artifact "/solana/$file" s3://release.jito.wtf/"$CHANNEL_OR_TAG"/"$file"
echo Published to:
- $DRYRUN ci/format-url.sh https://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
+ $DRYRUN ci/format-url.sh https://release.jito.wtf/"$CHANNEL_OR_TAG"/"$file"
if [[ -n $TAG ]]; then
ci/upload-github-release-asset.sh "$file"
fi
elif [[ -n $TRAVIS ]]; then
- # .travis.yml uploads everything in the travis-s3-upload/ directory to release.solana.com
+ # .travis.yml uploads everything in the travis-s3-upload/ directory to release.jito.wtf
mkdir -p travis-s3-upload/"$CHANNEL_OR_TAG"
cp -v "$file" travis-s3-upload/"$CHANNEL_OR_TAG"/
diff --git a/ci/test-coverage.sh b/ci/test-coverage.sh
index 44231cd338a13e..60e57c6331d0de 100755
--- a/ci/test-coverage.sh
+++ b/ci/test-coverage.sh
@@ -32,5 +32,5 @@ else
codecov -t "${CODECOV_TOKEN}"
annotate --style success --context codecov.io \
- "CodeCov report: https://codecov.io/github/solana-labs/solana/commit/${CI_COMMIT:0:9}"
+ "CodeCov report: https://codecov.io/github/jito-foundation/jito-solana/commit/${CI_COMMIT:0:9}"
fi
diff --git a/ci/upload-github-release-asset.sh b/ci/upload-github-release-asset.sh
index ca2ae2a8f60443..fb4de1af9e940d 100755
--- a/ci/upload-github-release-asset.sh
+++ b/ci/upload-github-release-asset.sh
@@ -26,7 +26,7 @@ fi
# Force CI_REPO_SLUG since sometimes
# BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG is not set correctly, causing the
# artifact upload to fail
-CI_REPO_SLUG=solana-labs/solana
+CI_REPO_SLUG=jito-foundation/jito-solana
#if [[ -z $CI_REPO_SLUG ]]; then
# echo Error: CI_REPO_SLUG not defined
# exit 1
diff --git a/core/Cargo.toml b/core/Cargo.toml
index fcab8ff8775912..6f3e2402ec3b19 100644
--- a/core/Cargo.toml
+++ b/core/Cargo.toml
@@ -14,6 +14,7 @@ edition = { workspace = true }
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
[dependencies]
+anchor-lang = { workspace = true }
base64 = { workspace = true }
bincode = { workspace = true }
bs58 = { workspace = true }
@@ -26,11 +27,16 @@ etcd-client = { workspace = true, features = ["tls"] }
futures = { workspace = true }
histogram = { workspace = true }
itertools = { workspace = true }
+jito-protos = { workspace = true }
+jito-tip-distribution = { workspace = true }
+jito-tip-payment = { workspace = true }
lazy_static = { workspace = true }
log = { workspace = true }
lru = { workspace = true }
min-max-heap = { workspace = true }
num_enum = { workspace = true }
+prost = { workspace = true }
+prost-types = { workspace = true }
quinn = { workspace = true }
rand = { workspace = true }
rand_chacha = { workspace = true }
@@ -43,6 +49,7 @@ serde_bytes = { workspace = true }
serde_derive = { workspace = true }
solana-accounts-db = { workspace = true }
solana-bloom = { workspace = true }
+solana-bundle = { workspace = true }
solana-client = { workspace = true }
solana-cost-model = { workspace = true }
solana-entry = { workspace = true }
@@ -62,6 +69,7 @@ solana-rayon-threadlimit = { workspace = true }
solana-rpc = { workspace = true }
solana-rpc-client-api = { workspace = true }
solana-runtime = { workspace = true }
+solana-runtime-plugin = { workspace = true }
solana-sdk = { workspace = true }
solana-send-transaction-service = { workspace = true }
solana-streamer = { workspace = true }
@@ -77,6 +85,7 @@ sys-info = { workspace = true }
tempfile = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true, features = ["full"] }
+tonic = { workspace = true }
trees = { workspace = true }
[dev-dependencies]
@@ -85,10 +94,13 @@ fs_extra = { workspace = true }
raptorq = { workspace = true }
serde_json = { workspace = true }
serial_test = { workspace = true }
+solana-accounts-db = { workspace = true }
# See order-crates-for-publishing.py for using this unusual `path = "."`
+solana-bundle = { workspace = true }
solana-core = { path = ".", features = ["dev-context-only-utils"] }
solana-logger = { workspace = true }
solana-program-runtime = { workspace = true }
+solana-program-test = { workspace = true }
solana-runtime = { workspace = true, features = ["dev-context-only-utils"] }
solana-sdk = { workspace = true, features = ["dev-context-only-utils"] }
solana-stake-program = { workspace = true }
@@ -101,6 +113,7 @@ sysctl = { workspace = true }
[build-dependencies]
rustc_version = { workspace = true }
+tonic-build = { workspace = true }
[features]
dev-context-only-utils = []
diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs
index b399abba0a9e5b..4b27e523e2f467 100644
--- a/core/benches/banking_stage.rs
+++ b/core/benches/banking_stage.rs
@@ -22,6 +22,7 @@ use {
BankingStage, BankingStageStats,
},
banking_trace::{BankingPacketBatch, BankingTracer},
+ bundle_stage::bundle_account_locker::BundleAccountLocker,
},
solana_entry::entry::{next_hash, Entry},
solana_gossip::cluster_info::{ClusterInfo, Node},
@@ -54,6 +55,7 @@ use {
vote_state::VoteStateUpdate, vote_transaction::new_vote_state_update_transaction,
},
std::{
+ collections::HashSet,
iter::repeat_with,
sync::{atomic::Ordering, Arc},
time::{Duration, Instant},
@@ -65,8 +67,15 @@ fn check_txs(receiver: &Arc>, ref_tx_count: usize) {
let mut total = 0;
let now = Instant::now();
loop {
- if let Ok((_bank, (entry, _tick_height))) = receiver.recv_timeout(Duration::new(1, 0)) {
- total += entry.transactions.len();
+ if let Ok(WorkingBankEntry {
+ bank: _,
+ entries_ticks,
+ }) = receiver.recv_timeout(Duration::new(1, 0))
+ {
+ total += entries_ticks
+ .iter()
+ .map(|e| e.0.transactions.len())
+ .sum::();
}
if total >= ref_tx_count {
break;
@@ -109,7 +118,14 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
);
let (s, _r) = unbounded();
let committer = Committer::new(None, s, Arc::new(PrioritizationFeeCache::new(0u64)));
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
// This tests the performance of buffering packets.
// If the packet buffers are copied, performance will be poor.
bencher.iter(move || {
@@ -305,6 +321,8 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
Arc::new(ConnectionCache::new("connection_cache_test")),
bank_forks,
&Arc::new(PrioritizationFeeCache::new(0u64)),
+ HashSet::default(),
+ BundleAccountLocker::default(),
);
let chunk_len = verified.len() / CHUNKS;
diff --git a/core/benches/consumer.rs b/core/benches/consumer.rs
index 928758deb7f55a..0781f9bd3f39d8 100644
--- a/core/benches/consumer.rs
+++ b/core/benches/consumer.rs
@@ -7,16 +7,16 @@ use {
iter::IndexedParallelIterator,
prelude::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator},
},
- solana_core::banking_stage::{
- committer::Committer, consumer::Consumer, qos_service::QosService,
+ solana_core::{
+ banking_stage::{committer::Committer, consumer::Consumer, qos_service::QosService},
+ bundle_stage::bundle_account_locker::BundleAccountLocker,
},
- solana_entry::entry::Entry,
solana_ledger::{
blockstore::Blockstore,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
},
solana_poh::{
- poh_recorder::{create_test_recorder, PohRecorder},
+ poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry},
poh_service::PohService,
},
solana_runtime::bank::Bank,
@@ -25,9 +25,12 @@ use {
signer::Signer, stake_history::Epoch, system_program, system_transaction,
transaction::SanitizedTransaction,
},
- std::sync::{
- atomic::{AtomicBool, Ordering},
- Arc, RwLock,
+ std::{
+ collections::HashSet,
+ sync::{
+ atomic::{AtomicBool, Ordering},
+ Arc, RwLock,
+ },
},
tempfile::TempDir,
test::Bencher,
@@ -80,7 +83,14 @@ fn create_consumer(poh_recorder: &RwLock) -> Consumer {
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
let committer = Committer::new(None, replay_vote_sender, Arc::default());
let transaction_recorder = poh_recorder.read().unwrap().new_recorder();
- Consumer::new(committer, transaction_recorder, QosService::new(0), None)
+ Consumer::new(
+ committer,
+ transaction_recorder,
+ QosService::new(0),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ )
}
struct BenchFrame {
@@ -89,7 +99,7 @@ struct BenchFrame {
exit: Arc,
poh_recorder: Arc>,
poh_service: PohService,
- signal_receiver: Receiver<(Arc, (Entry, u64))>,
+ signal_receiver: Receiver,
}
fn setup(apply_cost_tracker_during_replay: bool) -> BenchFrame {
diff --git a/core/benches/proto_to_packet.rs b/core/benches/proto_to_packet.rs
new file mode 100644
index 00000000000000..87f85f9c7fef94
--- /dev/null
+++ b/core/benches/proto_to_packet.rs
@@ -0,0 +1,56 @@
+#![feature(test)]
+
+extern crate test;
+
+use {
+ jito_protos::proto::packet::{
+ Meta as PbMeta, Packet as PbPacket, PacketBatch, PacketFlags as PbFlags,
+ },
+ solana_core::proto_packet_to_packet,
+ solana_sdk::packet::{Packet, PACKET_DATA_SIZE},
+ std::iter::repeat,
+ test::{black_box, Bencher},
+};
+
+fn get_proto_packet(i: u8) -> PbPacket {
+ PbPacket {
+ data: repeat(i).take(PACKET_DATA_SIZE).collect(),
+ meta: Some(PbMeta {
+ size: PACKET_DATA_SIZE as u64,
+ addr: "255.255.255.255:65535".to_string(),
+ port: 65535,
+ flags: Some(PbFlags {
+ discard: false,
+ forwarded: false,
+ repair: false,
+ simple_vote_tx: false,
+ tracer_packet: false,
+ }),
+ sender_stake: 0,
+ }),
+ }
+}
+
+#[bench]
+fn bench_proto_to_packet(bencher: &mut Bencher) {
+ bencher.iter(|| {
+ black_box(proto_packet_to_packet(get_proto_packet(1)));
+ });
+}
+
+#[bench]
+fn bench_batch_list_to_packets(bencher: &mut Bencher) {
+ let packet_batch = PacketBatch {
+ packets: (0..128).map(get_proto_packet).collect(),
+ };
+
+ bencher.iter(|| {
+ black_box(
+ packet_batch
+ .packets
+ .iter()
+ .map(|p| proto_packet_to_packet(p.clone()))
+ .collect::>(),
+ );
+ });
+}
diff --git a/core/src/admin_rpc_post_init.rs b/core/src/admin_rpc_post_init.rs
index 110e1f5aa42b66..7373ffd5b3a127 100644
--- a/core/src/admin_rpc_post_init.rs
+++ b/core/src/admin_rpc_post_init.rs
@@ -1,10 +1,12 @@
use {
+ crate::proxy::{block_engine_stage::BlockEngineConfig, relayer_stage::RelayerConfig},
solana_gossip::cluster_info::ClusterInfo,
solana_runtime::bank_forks::BankForks,
solana_sdk::pubkey::Pubkey,
std::{
collections::HashSet,
- sync::{Arc, RwLock},
+ net::SocketAddr,
+ sync::{Arc, Mutex, RwLock},
},
};
@@ -14,4 +16,7 @@ pub struct AdminRpcRequestMetadataPostInit {
pub bank_forks: Arc>,
pub vote_account: Pubkey,
pub repair_whitelist: Arc>>,
+ pub block_engine_config: Arc>,
+ pub relayer_config: Arc>,
+ pub shred_receiver_address: Arc>>,
}
diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs
index 972e606276a3aa..b4c43c5aca3702 100644
--- a/core/src/banking_stage.rs
+++ b/core/src/banking_stage.rs
@@ -16,8 +16,9 @@ use {
unprocessed_transaction_storage::{ThreadType, UnprocessedTransactionStorage},
},
crate::{
- banking_trace::BankingPacketReceiver, tracer_packet_stats::TracerPacketStats,
- validator::BlockProductionMethod,
+ banking_trace::BankingPacketReceiver,
+ bundle_stage::bundle_account_locker::BundleAccountLocker,
+ tracer_packet_stats::TracerPacketStats, validator::BlockProductionMethod,
},
crossbeam_channel::RecvTimeoutError,
histogram::Histogram,
@@ -28,10 +29,12 @@ use {
solana_perf::{data_budget::DataBudget, packet::PACKETS_PER_BATCH},
solana_poh::poh_recorder::PohRecorder,
solana_runtime::{bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache},
- solana_sdk::timing::AtomicInterval,
+ solana_sdk::{pubkey::Pubkey, timing::AtomicInterval},
solana_vote::vote_sender_types::ReplayVoteSender,
std::{
- cmp, env,
+ cmp,
+ collections::HashSet,
+ env,
sync::{
atomic::{AtomicU64, AtomicUsize, Ordering},
Arc, RwLock,
@@ -50,13 +53,13 @@ pub mod unprocessed_packet_batches;
pub mod unprocessed_transaction_storage;
mod consume_worker;
-mod decision_maker;
+pub(crate) mod decision_maker;
mod forward_packet_batches_by_accounts;
mod forward_worker;
mod forwarder;
-mod immutable_deserialized_packet;
+pub(crate) mod immutable_deserialized_packet;
mod latest_unprocessed_votes;
-mod leader_slot_timing_metrics;
+pub(crate) mod leader_slot_timing_metrics;
mod multi_iterator_scanner;
mod packet_deserializer;
mod packet_filter;
@@ -328,6 +331,8 @@ impl BankingStage {
connection_cache: Arc,
bank_forks: Arc>,
prioritization_fee_cache: &Arc,
+ blacklisted_accounts: HashSet,
+ bundle_account_locker: BundleAccountLocker,
) -> Self {
Self::new_num_threads(
block_production_method,
@@ -343,6 +348,8 @@ impl BankingStage {
connection_cache,
bank_forks,
prioritization_fee_cache,
+ blacklisted_accounts,
+ bundle_account_locker,
)
}
@@ -361,6 +368,8 @@ impl BankingStage {
connection_cache: Arc,
bank_forks: Arc>,
prioritization_fee_cache: &Arc,
+ blacklisted_accounts: HashSet,
+ bundle_account_locker: BundleAccountLocker,
) -> Self {
match block_production_method {
BlockProductionMethod::ThreadLocalMultiIterator => {
@@ -377,6 +386,8 @@ impl BankingStage {
connection_cache,
bank_forks,
prioritization_fee_cache,
+ blacklisted_accounts,
+ bundle_account_locker,
)
}
}
@@ -396,6 +407,8 @@ impl BankingStage {
connection_cache: Arc,
bank_forks: Arc>,
prioritization_fee_cache: &Arc,
+ blacklisted_accounts: HashSet,
+ bundle_account_locker: BundleAccountLocker,
) -> Self {
assert!(num_threads >= MIN_TOTAL_THREADS);
// Single thread to generate entries from many banks.
@@ -455,6 +468,8 @@ impl BankingStage {
poh_recorder.read().unwrap().new_recorder(),
QosService::new(id),
log_messages_bytes_limit,
+ blacklisted_accounts.clone(),
+ bundle_account_locker.clone(),
);
Builder::new()
@@ -615,7 +630,7 @@ mod tests {
crate::banking_trace::{BankingPacketBatch, BankingTracer},
crossbeam_channel::{unbounded, Receiver},
itertools::Itertools,
- solana_entry::entry::{Entry, EntrySlice},
+ solana_entry::entry::EntrySlice,
solana_gossip::cluster_info::Node,
solana_ledger::{
blockstore::Blockstore,
@@ -629,6 +644,7 @@ mod tests {
solana_poh::{
poh_recorder::{
create_test_recorder, PohRecorderError, Record, RecordTransactionsSummary,
+ WorkingBankEntry,
},
poh_service::PohService,
},
@@ -703,6 +719,8 @@ mod tests {
Arc::new(ConnectionCache::new("connection_cache_test")),
bank_forks,
&Arc::new(PrioritizationFeeCache::new(0u64)),
+ HashSet::default(),
+ BundleAccountLocker::default(),
);
drop(non_vote_sender);
drop(tpu_vote_sender);
@@ -760,6 +778,8 @@ mod tests {
Arc::new(ConnectionCache::new("connection_cache_test")),
bank_forks,
&Arc::new(PrioritizationFeeCache::new(0u64)),
+ HashSet::default(),
+ BundleAccountLocker::default(),
);
trace!("sending bank");
drop(non_vote_sender);
@@ -772,7 +792,12 @@ mod tests {
trace!("getting entries");
let entries: Vec<_> = entry_receiver
.iter()
- .map(|(_bank, (entry, _tick_height))| entry)
+ .flat_map(
+ |WorkingBankEntry {
+ bank: _,
+ entries_ticks,
+ }| entries_ticks.into_iter().map(|(e, _)| e),
+ )
.collect();
trace!("done");
assert_eq!(entries.len(), genesis_config.ticks_per_slot as usize);
@@ -842,6 +867,8 @@ mod tests {
Arc::new(ConnectionCache::new("connection_cache_test")),
bank_forks,
&Arc::new(PrioritizationFeeCache::new(0u64)),
+ HashSet::default(),
+ BundleAccountLocker::default(),
);
// fund another account so we can send 2 good transactions in a single batch.
@@ -893,9 +920,14 @@ mod tests {
bank.process_transaction(&fund_tx).unwrap();
//receive entries + ticks
loop {
- let entries: Vec = entry_receiver
+ let entries: Vec<_> = entry_receiver
.iter()
- .map(|(_bank, (entry, _tick_height))| entry)
+ .flat_map(
+ |WorkingBankEntry {
+ bank: _,
+ entries_ticks,
+ }| entries_ticks.into_iter().map(|(e, _)| e),
+ )
.collect();
assert!(entries.verify(&blockhash));
@@ -1004,6 +1036,8 @@ mod tests {
Arc::new(ConnectionCache::new("connection_cache_test")),
bank_forks,
&Arc::new(PrioritizationFeeCache::new(0u64)),
+ HashSet::default(),
+ BundleAccountLocker::default(),
);
// wait for banking_stage to eat the packets
@@ -1022,7 +1056,12 @@ mod tests {
// check that the balance is what we expect.
let entries: Vec<_> = entry_receiver
.iter()
- .map(|(_bank, (entry, _tick_height))| entry)
+ .flat_map(
+ |WorkingBankEntry {
+ bank: _,
+ entries_ticks,
+ }| entries_ticks.into_iter().map(|(e, _)| e),
+ )
.collect();
let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config);
@@ -1083,15 +1122,19 @@ mod tests {
system_transaction::transfer(&keypair2, &pubkey2, 1, genesis_config.hash()).into(),
];
- let _ = recorder.record_transactions(bank.slot(), txs.clone());
- let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap();
+ let _ = recorder.record_transactions(bank.slot(), vec![txs.clone()]);
+ let WorkingBankEntry {
+ bank,
+ entries_ticks,
+ } = entry_receiver.recv().unwrap();
+ let entry = &entries_ticks.get(0).unwrap().0;
assert_eq!(entry.transactions, txs);
// Once bank is set to a new bank (setting bank.slot() + 1 in record_transactions),
// record_transactions should throw MaxHeightReached
let next_slot = bank.slot() + 1;
let RecordTransactionsSummary { result, .. } =
- recorder.record_transactions(next_slot, txs);
+ recorder.record_transactions(next_slot, vec![txs]);
assert_matches!(result, Err(PohRecorderError::MaxHeightReached));
// Should receive nothing from PohRecorder b/c record failed
assert!(entry_receiver.try_recv().is_err());
@@ -1195,6 +1238,8 @@ mod tests {
Arc::new(ConnectionCache::new("connection_cache_test")),
bank_forks,
&Arc::new(PrioritizationFeeCache::new(0u64)),
+ HashSet::default(),
+ BundleAccountLocker::default(),
);
let keypairs = (0..100).map(|_| Keypair::new()).collect_vec();
diff --git a/core/src/banking_stage/committer.rs b/core/src/banking_stage/committer.rs
index 88b129aae3d203..fcd27e6e1a9c54 100644
--- a/core/src/banking_stage/committer.rs
+++ b/core/src/banking_stage/committer.rs
@@ -15,12 +15,10 @@ use {
prioritization_fee_cache::PrioritizationFeeCache,
transaction_batch::TransactionBatch,
},
- solana_sdk::{hash::Hash, pubkey::Pubkey, saturating_add_assign},
- solana_transaction_status::{
- token_balances::TransactionTokenBalancesSet, TransactionTokenBalance,
- },
+ solana_sdk::{hash::Hash, saturating_add_assign},
+ solana_transaction_status::{token_balances::TransactionTokenBalancesSet, PreBalanceInfo},
solana_vote::vote_sender_types::ReplayVoteSender,
- std::{collections::HashMap, sync::Arc},
+ std::sync::Arc,
};
#[derive(Clone, Debug, PartialEq, Eq)]
@@ -29,13 +27,6 @@ pub enum CommitTransactionDetails {
NotCommitted,
}
-#[derive(Default)]
-pub(super) struct PreBalanceInfo {
- pub native: Vec>,
- pub token: Vec>,
- pub mint_decimals: HashMap,
-}
-
pub struct Committer {
transaction_status_sender: Option,
replay_vote_sender: ReplayVoteSender,
@@ -143,7 +134,7 @@ impl Committer {
let txs = batch.sanitized_transactions().to_vec();
let post_balances = bank.collect_balances(batch);
let post_token_balances =
- collect_token_balances(bank, batch, &mut pre_balance_info.mint_decimals);
+ collect_token_balances(bank, batch, &mut pre_balance_info.mint_decimals, None);
let mut transaction_index = starting_transaction_index.unwrap_or_default();
let batch_transaction_indexes: Vec<_> = tx_results
.execution_results
diff --git a/core/src/banking_stage/consume_worker.rs b/core/src/banking_stage/consume_worker.rs
index d2451efa1c4c66..3abdee1585d96b 100644
--- a/core/src/banking_stage/consume_worker.rs
+++ b/core/src/banking_stage/consume_worker.rs
@@ -126,11 +126,14 @@ fn try_drain_iter(work: T, receiver: &Receiver) -> impl Iterator-
mod tests {
use {
super::*,
- crate::banking_stage::{
- committer::Committer,
- qos_service::QosService,
- scheduler_messages::{TransactionBatchId, TransactionId},
- tests::{create_slow_genesis_config, sanitize_transactions, simulate_poh},
+ crate::{
+ banking_stage::{
+ committer::Committer,
+ qos_service::QosService,
+ scheduler_messages::{TransactionBatchId, TransactionId},
+ tests::{create_slow_genesis_config, sanitize_transactions, simulate_poh},
+ },
+ bundle_stage::bundle_account_locker::BundleAccountLocker,
},
crossbeam_channel::unbounded,
solana_ledger::{
@@ -145,6 +148,7 @@ mod tests {
},
solana_vote::vote_sender_types::ReplayVoteReceiver,
std::{
+ collections::HashSet,
sync::{atomic::AtomicBool, RwLock},
thread::JoinHandle,
},
@@ -202,7 +206,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
let (consume_sender, consume_receiver) = unbounded();
let (consumed_sender, consumed_receiver) = unbounded();
diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs
index c8624a96aad7d4..134d7096a536ec 100644
--- a/core/src/banking_stage/consumer.rs
+++ b/core/src/banking_stage/consumer.rs
@@ -1,6 +1,6 @@
use {
super::{
- committer::{CommitTransactionDetails, Committer, PreBalanceInfo},
+ committer::{CommitTransactionDetails, Committer},
immutable_deserialized_packet::ImmutableDeserializedPacket,
leader_slot_metrics::{LeaderSlotMetricsTracker, ProcessTransactionsSummary},
leader_slot_timing_metrics::LeaderExecuteAndCommitTimings,
@@ -8,6 +8,7 @@ use {
unprocessed_transaction_storage::{ConsumeScannerPayload, UnprocessedTransactionStorage},
BankingStageStats,
},
+ crate::bundle_stage::bundle_account_locker::BundleAccountLocker,
itertools::Itertools,
solana_accounts_db::{
transaction_error_metrics::TransactionErrorMetrics,
@@ -19,18 +20,21 @@ use {
BankStart, PohRecorderError, RecordTransactionsSummary, RecordTransactionsTimings,
TransactionRecorder,
},
- solana_program_runtime::timings::ExecuteTimings,
solana_runtime::{
bank::{Bank, LoadAndExecuteTransactionsOutput},
transaction_batch::TransactionBatch,
},
solana_sdk::{
clock::{Slot, FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, MAX_PROCESSING_AGE},
- feature_set, saturating_add_assign,
+ feature_set,
+ pubkey::Pubkey,
+ saturating_add_assign,
timing::timestamp,
transaction::{self, AddressLoader, SanitizedTransaction, TransactionError},
},
+ solana_transaction_status::PreBalanceInfo,
std::{
+ collections::HashSet,
sync::{atomic::Ordering, Arc},
time::Instant,
},
@@ -71,6 +75,8 @@ pub struct Consumer {
transaction_recorder: TransactionRecorder,
qos_service: QosService,
log_messages_bytes_limit: Option
,
+ blacklisted_accounts: HashSet,
+ bundle_account_locker: BundleAccountLocker,
}
impl Consumer {
@@ -79,12 +85,16 @@ impl Consumer {
transaction_recorder: TransactionRecorder,
qos_service: QosService,
log_messages_bytes_limit: Option,
+ blacklisted_accounts: HashSet,
+ bundle_account_locker: BundleAccountLocker,
) -> Self {
Self {
committer,
transaction_recorder,
qos_service,
log_messages_bytes_limit,
+ blacklisted_accounts,
+ bundle_account_locker,
}
}
@@ -114,6 +124,7 @@ impl Consumer {
packets_to_process,
)
},
+ &self.blacklisted_accounts,
);
if reached_end_of_slot {
@@ -444,20 +455,26 @@ impl Consumer {
cost_model_us,
) = measure_us!(self.qos_service.select_and_accumulate_transaction_costs(
bank,
+ &mut bank.write_cost_tracker().unwrap(),
txs,
pre_results
));
// Only lock accounts for those transactions are selected for the block;
// Once accounts are locked, other threads cannot encode transactions that will modify the
- // same account state
+ // same account state.
+ // BundleAccountLocker is used to prevent race conditions with bundled transactions from bundle stage
+ let bundle_account_locks = self.bundle_account_locker.account_locks();
let (batch, lock_us) = measure_us!(bank.prepare_sanitized_batch_with_results(
txs,
transaction_qos_cost_results.iter().map(|r| match r {
Ok(_cost) => Ok(()),
Err(err) => Err(err.clone()),
- })
+ }),
+ &bundle_account_locks.read_locks(),
+ &bundle_account_locks.write_locks()
));
+ drop(bundle_account_locks);
// retryable_txs includes AccountInUse, WouldExceedMaxBlockCostLimit
// WouldExceedMaxAccountCostLimit, WouldExceedMaxVoteCostLimit
@@ -502,8 +519,9 @@ impl Consumer {
.iter_mut()
.for_each(|x| *x += chunk_offset);
- let (cu, us) =
- Self::accumulate_execute_units_and_time(&execute_and_commit_timings.execute_timings);
+ let (cu, us) = execute_and_commit_timings
+ .execute_timings
+ .accumulate_execute_units_and_time();
self.qos_service.accumulate_actual_execute_cu(cu);
self.qos_service.accumulate_actual_execute_time(us);
@@ -540,7 +558,7 @@ impl Consumer {
if transaction_status_sender_enabled {
pre_balance_info.native = bank.collect_balances(batch);
pre_balance_info.token =
- collect_token_balances(bank, batch, &mut pre_balance_info.mint_decimals)
+ collect_token_balances(bank, batch, &mut pre_balance_info.mint_decimals, None)
}
});
execute_and_commit_timings.collect_balances_us = collect_balances_us;
@@ -594,7 +612,7 @@ impl Consumer {
let (record_transactions_summary, record_us) = measure_us!(self
.transaction_recorder
- .record_transactions(bank.slot(), executed_transactions));
+ .record_transactions(bank.slot(), vec![executed_transactions]));
execute_and_commit_timings.record_us = record_us;
let RecordTransactionsSummary {
@@ -678,18 +696,6 @@ impl Consumer {
}
}
- fn accumulate_execute_units_and_time(execute_timings: &ExecuteTimings) -> (u64, u64) {
- execute_timings.details.per_program_timings.values().fold(
- (0, 0),
- |(units, times), program_timings| {
- (
- units.saturating_add(program_timings.accumulated_units),
- times.saturating_add(program_timings.accumulated_us),
- )
- },
- )
- }
-
/// This function filters pending packets that are still valid
/// # Arguments
/// * `transactions` - a batch of transactions deserialized from packets
@@ -755,7 +761,7 @@ mod tests {
},
solana_perf::packet::Packet,
solana_poh::poh_recorder::{PohRecorder, WorkingBankEntry},
- solana_program_runtime::timings::ProgramTiming,
+ solana_program_runtime::timings::{ExecuteTimings, ProgramTiming},
solana_rpc::transaction_status_service::TransactionStatusService,
solana_runtime::prioritization_fee_cache::PrioritizationFeeCache,
solana_sdk::{
@@ -818,7 +824,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
let process_transactions_summary =
consumer.process_transactions(&bank, &Instant::now(), &transactions);
@@ -974,7 +987,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
let process_transactions_batch_output =
consumer.process_and_record_transactions(&bank, &transactions, 0);
@@ -999,7 +1019,13 @@ mod tests {
let mut done = false;
// read entries until I find mine, might be ticks...
- while let Ok((_bank, (entry, _tick_height))) = entry_receiver.recv() {
+ while let Ok(WorkingBankEntry {
+ bank,
+ entries_ticks,
+ }) = entry_receiver.recv()
+ {
+ assert!(entries_ticks.len() == 1);
+ let entry = &entries_ticks.get(0).unwrap().0;
if !entry.is_tick() {
trace!("got entry");
assert_eq!(entry.transactions.len(), transactions.len());
@@ -1101,7 +1127,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
let process_transactions_batch_output =
consumer.process_and_record_transactions(&bank, &transactions, 0);
@@ -1187,7 +1220,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
let get_block_cost = || bank.read_cost_tracker().unwrap().block_cost();
let get_tx_count = || bank.read_cost_tracker().unwrap().transaction_count();
@@ -1339,7 +1379,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
let process_transactions_batch_output =
consumer.process_and_record_transactions(&bank, &transactions, 0);
@@ -1536,7 +1583,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder.clone(), QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder.clone(),
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
let process_transactions_summary =
consumer.process_transactions(&bank, &Instant::now(), &transactions);
@@ -1661,7 +1715,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
let _ = consumer.process_and_record_transactions(&bank, &transactions, 0);
@@ -1798,7 +1859,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
let _ = consumer.process_and_record_transactions(&bank, &[sanitized_tx.clone()], 0);
@@ -1858,7 +1926,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
// When the working bank in poh_recorder is None, no packets should be processed (consume will not be called)
assert!(!poh_recorder.read().unwrap().has_bank());
@@ -1936,7 +2011,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
// When the working bank in poh_recorder is None, no packets should be processed
assert!(!poh_recorder.read().unwrap().has_bank());
@@ -1988,7 +2070,14 @@ mod tests {
replay_vote_sender,
Arc::new(PrioritizationFeeCache::new(0u64)),
);
- let consumer = Consumer::new(committer, recorder, QosService::new(1), None);
+ let consumer = Consumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ HashSet::default(),
+ BundleAccountLocker::default(),
+ );
// When the working bank in poh_recorder is None, no packets should be processed (consume will not be called)
assert!(!poh_recorder.read().unwrap().has_bank());
@@ -2076,7 +2165,7 @@ mod tests {
expected_units += n * 1000;
}
- let (units, us) = Consumer::accumulate_execute_units_and_time(&execute_timings);
+ let (units, us) = execute_timings.accumulate_execute_units_and_time();
assert_eq!(expected_units, units);
assert_eq!(expected_us, us);
diff --git a/core/src/banking_stage/latest_unprocessed_votes.rs b/core/src/banking_stage/latest_unprocessed_votes.rs
index 10772b74dee3cc..f81a3ca7d85e21 100644
--- a/core/src/banking_stage/latest_unprocessed_votes.rs
+++ b/core/src/banking_stage/latest_unprocessed_votes.rs
@@ -136,7 +136,7 @@ pub(crate) fn weighted_random_order_by_stake<'a>(
}
#[derive(Default, Debug)]
-pub(crate) struct VoteBatchInsertionMetrics {
+pub struct VoteBatchInsertionMetrics {
pub(crate) num_dropped_gossip: usize,
pub(crate) num_dropped_tpu: usize,
}
diff --git a/core/src/banking_stage/qos_service.rs b/core/src/banking_stage/qos_service.rs
index abac9c70f854f1..70e1ca16994415 100644
--- a/core/src/banking_stage/qos_service.rs
+++ b/core/src/banking_stage/qos_service.rs
@@ -5,7 +5,9 @@
use {
super::{committer::CommitTransactionDetails, BatchedTransactionDetails},
- solana_cost_model::{cost_model::CostModel, transaction_cost::TransactionCost},
+ solana_cost_model::{
+ cost_model::CostModel, cost_tracker::CostTracker, transaction_cost::TransactionCost,
+ },
solana_measure::measure::Measure,
solana_runtime::bank::Bank,
solana_sdk::{
@@ -40,6 +42,7 @@ impl QosService {
pub fn select_and_accumulate_transaction_costs(
&self,
bank: &Bank,
+ cost_tracker: &mut CostTracker, // caller should pass in &mut bank.write_cost_tracker().unwrap()
transactions: &[SanitizedTransaction],
pre_results: impl Iterator- >,
) -> (Vec
>, usize) {
@@ -48,7 +51,8 @@ impl QosService {
let (transactions_qos_cost_results, num_included) = self.select_transactions_per_cost(
transactions.iter(),
transaction_costs.into_iter(),
- bank,
+ bank.slot(),
+ cost_tracker,
);
self.accumulate_estimated_transaction_costs(&Self::accumulate_batched_transaction_costs(
transactions_qos_cost_results.iter(),
@@ -94,10 +98,10 @@ impl QosService {
&self,
transactions: impl Iterator- ,
transactions_costs: impl Iterator
- >,
- bank: &Bank,
+ slot: Slot,
+ cost_tracker: &mut CostTracker,
) -> (Vec
>, usize) {
let mut cost_tracking_time = Measure::start("cost_tracking_time");
- let mut cost_tracker = bank.write_cost_tracker().unwrap();
let mut num_included = 0;
let select_results = transactions.zip(transactions_costs)
.map(|(tx, cost)| {
@@ -105,13 +109,13 @@ impl QosService {
Ok(cost) => {
match cost_tracker.try_add(&cost) {
Ok(current_block_cost) => {
- debug!("slot {:?}, transaction {:?}, cost {:?}, fit into current block, current block cost {}", bank.slot(), tx, cost, current_block_cost);
+ debug!("slot {:?}, transaction {:?}, cost {:?}, fit into current block, current block cost {}", slot, tx, cost, current_block_cost);
self.metrics.stats.selected_txs_count.fetch_add(1, Ordering::Relaxed);
num_included += 1;
Ok(cost)
},
Err(e) => {
- debug!("slot {:?}, transaction {:?}, cost {:?}, not fit into current block, '{:?}'", bank.slot(), tx, cost, e);
+ debug!("slot {:?}, transaction {:?}, cost {:?}, not fit into current block, '{:?}'", slot, tx, cost, e);
Err(TransactionError::from(e))
}
}
@@ -683,8 +687,12 @@ mod tests {
bank.write_cost_tracker()
.unwrap()
.set_limits(cost_limit, cost_limit, cost_limit);
- let (results, num_selected) =
- qos_service.select_transactions_per_cost(txs.iter(), txs_costs.into_iter(), &bank);
+ let (results, num_selected) = qos_service.select_transactions_per_cost(
+ txs.iter(),
+ txs_costs.into_iter(),
+ bank.slot(),
+ &mut bank.write_cost_tracker().unwrap(),
+ );
assert_eq!(num_selected, 2);
// verify that first transfer tx and first vote are allowed
@@ -725,8 +733,12 @@ mod tests {
.iter()
.map(|cost| cost.as_ref().unwrap().sum())
.sum();
- let (qos_cost_results, _num_included) =
- qos_service.select_transactions_per_cost(txs.iter(), txs_costs.into_iter(), &bank);
+ let (qos_cost_results, _num_included) = qos_service.select_transactions_per_cost(
+ txs.iter(),
+ txs_costs.into_iter(),
+ bank.slot(),
+ &mut bank.write_cost_tracker().unwrap(),
+ );
assert_eq!(
total_txs_cost,
bank.read_cost_tracker().unwrap().block_cost()
@@ -793,8 +805,12 @@ mod tests {
.iter()
.map(|cost| cost.as_ref().unwrap().sum())
.sum();
- let (qos_cost_results, _num_included) =
- qos_service.select_transactions_per_cost(txs.iter(), txs_costs.into_iter(), &bank);
+ let (qos_cost_results, _num_included) = qos_service.select_transactions_per_cost(
+ txs.iter(),
+ txs_costs.into_iter(),
+ bank.slot(),
+ &mut bank.write_cost_tracker().unwrap(),
+ );
assert_eq!(
total_txs_cost,
bank.read_cost_tracker().unwrap().block_cost()
@@ -847,8 +863,12 @@ mod tests {
.iter()
.map(|cost| cost.as_ref().unwrap().sum())
.sum();
- let (qos_cost_results, _num_included) =
- qos_service.select_transactions_per_cost(txs.iter(), txs_costs.into_iter(), &bank);
+ let (qos_cost_results, _num_included) = qos_service.select_transactions_per_cost(
+ txs.iter(),
+ txs_costs.into_iter(),
+ bank.slot(),
+ &mut bank.write_cost_tracker().unwrap(),
+ );
assert_eq!(
total_txs_cost,
bank.read_cost_tracker().unwrap().block_cost()
diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs
index 80ce0875323819..7fb57c20732208 100644
--- a/core/src/banking_stage/unprocessed_transaction_storage.rs
+++ b/core/src/banking_stage/unprocessed_transaction_storage.rs
@@ -14,17 +14,29 @@ use {
},
BankingStageStats, FilterForwardingResults, ForwardOption,
},
+ crate::{
+ bundle_stage::bundle_stage_leader_metrics::BundleStageLeaderMetrics,
+ immutable_deserialized_bundle::ImmutableDeserializedBundle,
+ },
itertools::Itertools,
min_max_heap::MinMaxHeap,
+ solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics,
+ solana_bundle::{bundle_execution::LoadAndExecuteBundleError, BundleExecutionError},
solana_measure::measure,
solana_runtime::bank::Bank,
solana_sdk::{
- clock::FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, feature_set::FeatureSet, hash::Hash,
- saturating_add_assign, transaction::SanitizedTransaction,
+ bundle::SanitizedBundle,
+ clock::{Slot, FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET},
+ feature_set::FeatureSet,
+ hash::Hash,
+ pubkey::Pubkey,
+ saturating_add_assign,
+ transaction::SanitizedTransaction,
},
std::{
- collections::HashMap,
+ collections::{HashMap, HashSet, VecDeque},
sync::{atomic::Ordering, Arc},
+ time::Instant,
},
};
@@ -39,6 +51,7 @@ const MAX_NUM_VOTES_RECEIVE: usize = 10_000;
pub enum UnprocessedTransactionStorage {
VoteStorage(VoteStorage),
LocalTransactionStorage(ThreadLocalUnprocessedPackets),
+ BundleStorage(BundleStorage),
}
#[derive(Debug)]
@@ -57,10 +70,11 @@ pub struct VoteStorage {
pub enum ThreadType {
Voting(VoteSource),
Transactions,
+ Bundles,
}
#[derive(Debug)]
-pub(crate) enum InsertPacketBatchSummary {
+pub enum InsertPacketBatchSummary {
VoteBatchInsertionMetrics(VoteBatchInsertionMetrics),
PacketBatchInsertionMetrics(PacketBatchInsertionMetrics),
}
@@ -143,6 +157,7 @@ fn consume_scan_should_process_packet(
banking_stage_stats: &BankingStageStats,
packet: &ImmutableDeserializedPacket,
payload: &mut ConsumeScannerPayload,
+ blacklisted_accounts: &HashSet,
) -> ProcessingDecision {
// If end of the slot, return should process (quick loop after reached end of slot)
if payload.reached_end_of_slot {
@@ -177,6 +192,10 @@ fn consume_scan_should_process_packet(
bank.get_transaction_account_lock_limit(),
)
.is_err()
+ || message
+ .account_keys()
+ .iter()
+ .any(|key| blacklisted_accounts.contains(key))
{
payload
.message_hash_to_transaction
@@ -245,10 +264,24 @@ impl UnprocessedTransactionStorage {
})
}
+ pub fn new_bundle_storage(
+ unprocessed_bundle_storage: VecDeque,
+ cost_model_failed_bundles: VecDeque,
+ ) -> Self {
+ Self::BundleStorage(BundleStorage {
+ last_update_slot: Slot::default(),
+ unprocessed_bundle_storage,
+ cost_model_buffered_bundle_storage: cost_model_failed_bundles,
+ })
+ }
+
pub fn is_empty(&self) -> bool {
match self {
Self::VoteStorage(vote_storage) => vote_storage.is_empty(),
Self::LocalTransactionStorage(transaction_storage) => transaction_storage.is_empty(),
+ UnprocessedTransactionStorage::BundleStorage(bundle_storage) => {
+ bundle_storage.is_empty()
+ }
}
}
@@ -256,6 +289,10 @@ impl UnprocessedTransactionStorage {
match self {
Self::VoteStorage(vote_storage) => vote_storage.len(),
Self::LocalTransactionStorage(transaction_storage) => transaction_storage.len(),
+ UnprocessedTransactionStorage::BundleStorage(bundle_storage) => {
+ bundle_storage.unprocessed_bundles_len()
+ + bundle_storage.cost_model_buffered_bundles_len()
+ }
}
}
@@ -266,6 +303,9 @@ impl UnprocessedTransactionStorage {
Self::LocalTransactionStorage(transaction_storage) => {
transaction_storage.max_receive_size()
}
+ UnprocessedTransactionStorage::BundleStorage(bundle_storage) => {
+ bundle_storage.max_receive_size()
+ }
}
}
@@ -292,6 +332,9 @@ impl UnprocessedTransactionStorage {
Self::LocalTransactionStorage(transaction_storage) => {
transaction_storage.forward_option()
}
+ UnprocessedTransactionStorage::BundleStorage(bundle_storage) => {
+ bundle_storage.forward_option()
+ }
}
}
@@ -299,6 +342,16 @@ impl UnprocessedTransactionStorage {
match self {
Self::LocalTransactionStorage(transaction_storage) => transaction_storage.clear(), // Since we set everything as forwarded this is the same
Self::VoteStorage(vote_storage) => vote_storage.clear_forwarded_packets(),
+ UnprocessedTransactionStorage::BundleStorage(bundle_storage) => {
+ let _ = bundle_storage.reset();
+ }
+ }
+ }
+
+ pub fn bundle_storage(&mut self) -> Option<&mut BundleStorage> {
+ match self {
+ UnprocessedTransactionStorage::BundleStorage(bundle_stoge) => Some(bundle_stoge),
+ _ => None,
}
}
@@ -313,6 +366,11 @@ impl UnprocessedTransactionStorage {
Self::LocalTransactionStorage(transaction_storage) => InsertPacketBatchSummary::from(
transaction_storage.insert_batch(deserialized_packets),
),
+ UnprocessedTransactionStorage::BundleStorage(_) => {
+ panic!(
+ "bundles must be inserted using UnprocessedTransactionStorage::insert_bundle"
+ )
+ }
}
}
@@ -332,6 +390,9 @@ impl UnprocessedTransactionStorage {
bank,
forward_packet_batches_by_accounts,
),
+ UnprocessedTransactionStorage::BundleStorage(_) => {
+ panic!("bundles are not forwarded between leaders")
+ }
}
}
@@ -345,6 +406,7 @@ impl UnprocessedTransactionStorage {
banking_stage_stats: &BankingStageStats,
slot_metrics_tracker: &mut LeaderSlotMetricsTracker,
processing_function: F,
+ blacklisted_accounts: &HashSet,
) -> bool
where
F: FnMut(
@@ -359,15 +421,62 @@ impl UnprocessedTransactionStorage {
banking_stage_stats,
slot_metrics_tracker,
processing_function,
+ blacklisted_accounts,
),
Self::VoteStorage(vote_storage) => vote_storage.process_packets(
bank,
banking_stage_stats,
slot_metrics_tracker,
processing_function,
+ blacklisted_accounts,
+ ),
+ UnprocessedTransactionStorage::BundleStorage(_) => panic!(
+ "UnprocessedTransactionStorage::BundleStorage does not support processing packets"
),
}
}
+
+ #[must_use]
+ pub fn process_bundles(
+ &mut self,
+ bank: Arc,
+ bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics,
+ blacklisted_accounts: &HashSet,
+ processing_function: F,
+ ) -> bool
+ where
+ F: FnMut(
+ &[(ImmutableDeserializedBundle, SanitizedBundle)],
+ &mut BundleStageLeaderMetrics,
+ ) -> Vec>,
+ {
+ match self {
+ UnprocessedTransactionStorage::BundleStorage(bundle_storage) => bundle_storage
+ .process_bundles(
+ bank,
+ bundle_stage_leader_metrics,
+ blacklisted_accounts,
+ processing_function,
+ ),
+ _ => panic!("class does not support processing bundles"),
+ }
+ }
+
+ /// Inserts bundles into storage. Only supported for UnprocessedTransactionStorage::BundleStorage
+ pub(crate) fn insert_bundles(
+ &mut self,
+ deserialized_bundles: Vec,
+ ) -> InsertPacketBundlesSummary {
+ match self {
+ UnprocessedTransactionStorage::BundleStorage(bundle_storage) => {
+ bundle_storage.insert_unprocessed_bundles(deserialized_bundles, true)
+ }
+ UnprocessedTransactionStorage::LocalTransactionStorage(_)
+ | UnprocessedTransactionStorage::VoteStorage(_) => {
+ panic!("UnprocessedTransactionStorage::insert_bundles only works for type UnprocessedTransactionStorage::BundleStorage");
+ }
+ }
+ }
}
impl VoteStorage {
@@ -436,6 +545,7 @@ impl VoteStorage {
banking_stage_stats: &BankingStageStats,
slot_metrics_tracker: &mut LeaderSlotMetricsTracker,
mut processing_function: F,
+ blacklisted_accounts: &HashSet,
) -> bool
where
F: FnMut(
@@ -449,7 +559,13 @@ impl VoteStorage {
let should_process_packet =
|packet: &Arc, payload: &mut ConsumeScannerPayload| {
- consume_scan_should_process_packet(&bank, banking_stage_stats, packet, payload)
+ consume_scan_should_process_packet(
+ &bank,
+ banking_stage_stats,
+ packet,
+ payload,
+ blacklisted_accounts,
+ )
};
// Based on the stake distribution present in the supplied bank, drain the unprocessed votes
@@ -524,6 +640,7 @@ impl ThreadLocalUnprocessedPackets {
ThreadType::Transactions => ForwardOption::ForwardTransaction,
ThreadType::Voting(VoteSource::Tpu) => ForwardOption::ForwardTpuVote,
ThreadType::Voting(VoteSource::Gossip) => ForwardOption::NotForward,
+ ThreadType::Bundles => panic!(), // TODO (LB)
}
}
@@ -848,6 +965,7 @@ impl ThreadLocalUnprocessedPackets {
banking_stage_stats: &BankingStageStats,
slot_metrics_tracker: &mut LeaderSlotMetricsTracker,
mut processing_function: F,
+ blacklisted_accounts: &HashSet,
) -> bool
where
F: FnMut(
@@ -862,7 +980,13 @@ impl ThreadLocalUnprocessedPackets {
let should_process_packet =
|packet: &Arc, payload: &mut ConsumeScannerPayload| {
- consume_scan_should_process_packet(bank, banking_stage_stats, packet, payload)
+ consume_scan_should_process_packet(
+ bank,
+ banking_stage_stats,
+ packet,
+ payload,
+ blacklisted_accounts,
+ )
};
let mut scanner = create_consume_multi_iterator(
&all_packets_to_process,
@@ -939,6 +1063,323 @@ impl ThreadLocalUnprocessedPackets {
}
}
+pub struct InsertPacketBundlesSummary {
+ pub insert_packets_summary: InsertPacketBatchSummary,
+ pub num_bundles_inserted: usize,
+ pub num_packets_inserted: usize,
+ pub num_bundles_dropped: usize,
+}
+
+/// Bundle storage has two deques: one for unprocessed bundles and another for ones that exceeded
+/// the cost model and need to get retried next slot.
+#[derive(Debug)]
+pub struct BundleStorage {
+ last_update_slot: Slot,
+ unprocessed_bundle_storage: VecDeque,
+ // Storage for bundles that exceeded the cost model for the slot they were last attempted
+ // execution on
+ cost_model_buffered_bundle_storage: VecDeque,
+}
+
+impl BundleStorage {
+ fn is_empty(&self) -> bool {
+ self.unprocessed_bundle_storage.is_empty()
+ }
+
+ pub fn unprocessed_bundles_len(&self) -> usize {
+ self.unprocessed_bundle_storage.len()
+ }
+
+ pub fn unprocessed_packets_len(&self) -> usize {
+ self.unprocessed_bundle_storage
+ .iter()
+ .map(|b| b.len())
+ .sum::()
+ }
+
+ pub(crate) fn cost_model_buffered_bundles_len(&self) -> usize {
+ self.cost_model_buffered_bundle_storage.len()
+ }
+
+ pub(crate) fn cost_model_buffered_packets_len(&self) -> usize {
+ self.cost_model_buffered_bundle_storage
+ .iter()
+ .map(|b| b.len())
+ .sum()
+ }
+
+ pub(crate) fn max_receive_size(&self) -> usize {
+ self.unprocessed_bundle_storage.capacity() - self.unprocessed_bundle_storage.len()
+ }
+
+ fn forward_option(&self) -> ForwardOption {
+ ForwardOption::NotForward
+ }
+
+ /// Returns the number of unprocessed bundles + cost model buffered cleared
+ pub fn reset(&mut self) -> (usize, usize) {
+ let num_unprocessed_bundles = self.unprocessed_bundle_storage.len();
+ let num_cost_model_buffered_bundles = self.cost_model_buffered_bundle_storage.len();
+ self.unprocessed_bundle_storage.clear();
+ self.cost_model_buffered_bundle_storage.clear();
+ (num_unprocessed_bundles, num_cost_model_buffered_bundles)
+ }
+
+ fn insert_bundles(
+ deque: &mut VecDeque,
+ deserialized_bundles: Vec,
+ push_back: bool,
+ ) -> InsertPacketBundlesSummary {
+ let mut num_bundles_inserted: usize = 0;
+ let mut num_packets_inserted: usize = 0;
+ let mut num_bundles_dropped: usize = 0;
+ let mut num_packets_dropped: usize = 0;
+
+ for bundle in deserialized_bundles {
+ if deque.capacity() == deque.len() {
+ saturating_add_assign!(num_bundles_dropped, 1);
+ saturating_add_assign!(num_packets_dropped, bundle.len());
+ } else {
+ saturating_add_assign!(num_bundles_inserted, 1);
+ saturating_add_assign!(num_packets_inserted, bundle.len());
+ if push_back {
+ deque.push_back(bundle);
+ } else {
+ deque.push_front(bundle)
+ }
+ }
+ }
+
+ InsertPacketBundlesSummary {
+ insert_packets_summary: PacketBatchInsertionMetrics {
+ num_dropped_packets: num_packets_dropped,
+ num_dropped_tracer_packets: 0,
+ }
+ .into(),
+ num_bundles_inserted,
+ num_packets_inserted,
+ num_bundles_dropped,
+ }
+ }
+
+ fn push_front_unprocessed_bundles(
+ &mut self,
+ deserialized_bundles: Vec,
+ ) -> InsertPacketBundlesSummary {
+ Self::insert_bundles(
+ &mut self.unprocessed_bundle_storage,
+ deserialized_bundles,
+ false,
+ )
+ }
+
+ fn push_back_cost_model_buffered_bundles(
+ &mut self,
+ deserialized_bundles: Vec,
+ ) -> InsertPacketBundlesSummary {
+ Self::insert_bundles(
+ &mut self.cost_model_buffered_bundle_storage,
+ deserialized_bundles,
+ true,
+ )
+ }
+
+ fn insert_unprocessed_bundles(
+ &mut self,
+ deserialized_bundles: Vec,
+ push_back: bool,
+ ) -> InsertPacketBundlesSummary {
+ Self::insert_bundles(
+ &mut self.unprocessed_bundle_storage,
+ deserialized_bundles,
+ push_back,
+ )
+ }
+
+ /// Drains bundles from the queue, sanitizes them to prepare for execution, executes them by
+ /// calling `processing_function`, then potentially rebuffer them.
+ pub fn process_bundles(
+ &mut self,
+ bank: Arc,
+ bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics,
+ blacklisted_accounts: &HashSet,
+ mut processing_function: F,
+ ) -> bool
+ where
+ F: FnMut(
+ &[(ImmutableDeserializedBundle, SanitizedBundle)],
+ &mut BundleStageLeaderMetrics,
+ ) -> Vec>,
+ {
+ let sanitized_bundles = self.drain_and_sanitize_bundles(
+ bank,
+ bundle_stage_leader_metrics,
+ blacklisted_accounts,
+ );
+
+ debug!("processing {} bundles", sanitized_bundles.len());
+ let bundle_execution_results =
+ processing_function(&sanitized_bundles, bundle_stage_leader_metrics);
+
+ let mut is_slot_over = false;
+
+ let mut rebuffered_bundles = Vec::new();
+
+ sanitized_bundles
+ .into_iter()
+ .zip(bundle_execution_results)
+ .for_each(
+ |((deserialized_bundle, sanitized_bundle), result)| match result {
+ Ok(_) => {
+ debug!("bundle={} executed ok", sanitized_bundle.bundle_id);
+ // yippee
+ }
+ Err(BundleExecutionError::PohRecordError(e)) => {
+ // buffer the bundle to the front of the queue to be attempted next slot
+ debug!(
+ "bundle={} poh record error: {e:?}",
+ sanitized_bundle.bundle_id
+ );
+ rebuffered_bundles.push(deserialized_bundle);
+ is_slot_over = true;
+ }
+ Err(BundleExecutionError::BankProcessingTimeLimitReached) => {
+ // buffer the bundle to the front of the queue to be attempted next slot
+ debug!("bundle={} bank processing done", sanitized_bundle.bundle_id);
+ rebuffered_bundles.push(deserialized_bundle);
+ is_slot_over = true;
+ }
+ Err(BundleExecutionError::ExceedsCostModel) => {
+ // cost model buffered bundles contain most recent bundles at the front of the queue
+ debug!(
+ "bundle={} exceeds cost model, rebuffering",
+ sanitized_bundle.bundle_id
+ );
+ self.push_back_cost_model_buffered_bundles(vec![deserialized_bundle]);
+ }
+ Err(BundleExecutionError::TransactionFailure(
+ LoadAndExecuteBundleError::ProcessingTimeExceeded(_),
+ )) => {
+ // these are treated the same as exceeds cost model and are rebuferred to be completed
+ // at the beginning of the next slot
+ debug!(
+ "bundle={} processing time exceeded, rebuffering",
+ sanitized_bundle.bundle_id
+ );
+ self.push_back_cost_model_buffered_bundles(vec![deserialized_bundle]);
+ }
+ Err(BundleExecutionError::TransactionFailure(e)) => {
+ debug!(
+ "bundle={} execution error: {:?}",
+ sanitized_bundle.bundle_id, e
+ );
+ // do nothing
+ }
+ Err(BundleExecutionError::TipError(e)) => {
+ debug!("bundle={} tip error: {}", sanitized_bundle.bundle_id, e);
+ // Tip errors are _typically_ due to misconfiguration (except for poh record error, bank processing error, exceeds cost model)
+ // in order to prevent buffering too many bundles, we'll just drop the bundle
+ }
+ Err(BundleExecutionError::LockError) => {
+ // lock errors are irrecoverable due to malformed transactions
+ debug!("bundle={} lock error", sanitized_bundle.bundle_id);
+ }
+ },
+ );
+
+ // rebuffered bundles are pushed onto deque in reverse order so the first bundle is at the front
+ for bundle in rebuffered_bundles.into_iter().rev() {
+ self.push_front_unprocessed_bundles(vec![bundle]);
+ }
+
+ is_slot_over
+ }
+
+ /// Drains the unprocessed_bundle_storage, converting bundle packets into SanitizedBundles
+ fn drain_and_sanitize_bundles(
+ &mut self,
+ bank: Arc,
+ bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics,
+ blacklisted_accounts: &HashSet,
+ ) -> Vec<(ImmutableDeserializedBundle, SanitizedBundle)> {
+ let mut error_metrics = TransactionErrorMetrics::default();
+
+ let start = Instant::now();
+
+ let mut sanitized_bundles = Vec::new();
+
+ // on new slot, drain anything that was buffered from last slot
+ if bank.slot() != self.last_update_slot {
+ sanitized_bundles.extend(
+ self.cost_model_buffered_bundle_storage
+ .drain(..)
+ .filter_map(|packet_bundle| {
+ let r = packet_bundle.build_sanitized_bundle(
+ &bank,
+ blacklisted_accounts,
+ &mut error_metrics,
+ );
+ bundle_stage_leader_metrics
+ .bundle_stage_metrics_tracker()
+ .increment_sanitize_transaction_result(&r);
+
+ match r {
+ Ok(sanitized_bundle) => Some((packet_bundle, sanitized_bundle)),
+ Err(e) => {
+ debug!(
+ "bundle id: {} error sanitizing: {}",
+ packet_bundle.bundle_id(),
+ e
+ );
+ None
+ }
+ }
+ }),
+ );
+
+ self.last_update_slot = bank.slot();
+ }
+
+ sanitized_bundles.extend(self.unprocessed_bundle_storage.drain(..).filter_map(
+ |packet_bundle| {
+ let r = packet_bundle.build_sanitized_bundle(
+ &bank,
+ blacklisted_accounts,
+ &mut error_metrics,
+ );
+ bundle_stage_leader_metrics
+ .bundle_stage_metrics_tracker()
+ .increment_sanitize_transaction_result(&r);
+ match r {
+ Ok(sanitized_bundle) => Some((packet_bundle, sanitized_bundle)),
+ Err(e) => {
+ debug!(
+ "bundle id: {} error sanitizing: {}",
+ packet_bundle.bundle_id(),
+ e
+ );
+ None
+ }
+ }
+ },
+ ));
+
+ let elapsed = start.elapsed().as_micros();
+ bundle_stage_leader_metrics
+ .bundle_stage_metrics_tracker()
+ .increment_sanitize_bundle_elapsed_us(elapsed as u64);
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .increment_transactions_from_packets_us(elapsed as u64);
+
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .accumulate_transaction_errors(&error_metrics);
+
+ sanitized_bundles
+ }
+}
+
#[cfg(test)]
mod tests {
use {
diff --git a/core/src/banking_trace.rs b/core/src/banking_trace.rs
index ba76b794ba2919..bedfe117dc1518 100644
--- a/core/src/banking_trace.rs
+++ b/core/src/banking_trace.rs
@@ -315,6 +315,7 @@ impl BankingTracer {
}
}
+#[derive(Clone)]
pub struct TracedSender {
label: ChannelLabel,
sender: Sender,
diff --git a/core/src/bundle_stage.rs b/core/src/bundle_stage.rs
new file mode 100644
index 00000000000000..de8dad38c7f882
--- /dev/null
+++ b/core/src/bundle_stage.rs
@@ -0,0 +1,436 @@
+//! The `bundle_stage` processes bundles, which are list of transactions to be executed
+//! sequentially and atomically.
+use {
+ crate::{
+ banking_stage::{
+ decision_maker::{BufferedPacketsDecision, DecisionMaker},
+ qos_service::QosService,
+ unprocessed_transaction_storage::UnprocessedTransactionStorage,
+ },
+ bundle_stage::{
+ bundle_account_locker::BundleAccountLocker, bundle_consumer::BundleConsumer,
+ bundle_packet_receiver::BundleReceiver,
+ bundle_reserved_space_manager::BundleReservedSpaceManager,
+ bundle_stage_leader_metrics::BundleStageLeaderMetrics, committer::Committer,
+ },
+ packet_bundle::PacketBundle,
+ proxy::block_engine_stage::BlockBuilderFeeInfo,
+ tip_manager::TipManager,
+ },
+ crossbeam_channel::{Receiver, RecvTimeoutError},
+ solana_cost_model::block_cost_limits::MAX_BLOCK_UNITS,
+ solana_gossip::cluster_info::ClusterInfo,
+ solana_ledger::blockstore_processor::TransactionStatusSender,
+ solana_measure::measure,
+ solana_poh::poh_recorder::PohRecorder,
+ solana_runtime::{bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache},
+ solana_sdk::timing::AtomicInterval,
+ solana_vote::vote_sender_types::ReplayVoteSender,
+ std::{
+ collections::VecDeque,
+ sync::{
+ atomic::{AtomicBool, AtomicU64, Ordering},
+ Arc, Mutex, RwLock,
+ },
+ thread::{self, Builder, JoinHandle},
+ time::{Duration, Instant},
+ },
+};
+
+pub mod bundle_account_locker;
+mod bundle_consumer;
+mod bundle_packet_deserializer;
+mod bundle_packet_receiver;
+mod bundle_reserved_space_manager;
+pub(crate) mod bundle_stage_leader_metrics;
+mod committer;
+
+const MAX_BUNDLE_RETRY_DURATION: Duration = Duration::from_millis(40);
+const SLOT_BOUNDARY_CHECK_PERIOD: Duration = Duration::from_millis(10);
+
+// Stats emitted periodically
+#[derive(Default)]
+pub struct BundleStageLoopMetrics {
+ last_report: AtomicInterval,
+ id: u32,
+
+ // total received
+ num_bundles_received: AtomicU64,
+ num_packets_received: AtomicU64,
+
+ // newly buffered
+ newly_buffered_bundles_count: AtomicU64,
+
+ // currently buffered
+ current_buffered_bundles_count: AtomicU64,
+ current_buffered_packets_count: AtomicU64,
+
+ // buffered due to cost model
+ cost_model_buffered_bundles_count: AtomicU64,
+ cost_model_buffered_packets_count: AtomicU64,
+
+ // number of bundles dropped during insertion
+ num_bundles_dropped: AtomicU64,
+
+ // timings
+ receive_and_buffer_bundles_elapsed_us: AtomicU64,
+ process_buffered_bundles_elapsed_us: AtomicU64,
+}
+
+impl BundleStageLoopMetrics {
+ fn new(id: u32) -> Self {
+ BundleStageLoopMetrics {
+ id,
+ ..BundleStageLoopMetrics::default()
+ }
+ }
+
+ pub fn increment_num_bundles_received(&mut self, count: u64) {
+ self.num_bundles_received
+ .fetch_add(count, Ordering::Relaxed);
+ }
+
+ pub fn increment_num_packets_received(&mut self, count: u64) {
+ self.num_packets_received
+ .fetch_add(count, Ordering::Relaxed);
+ }
+
+ pub fn increment_newly_buffered_bundles_count(&mut self, count: u64) {
+ self.newly_buffered_bundles_count
+ .fetch_add(count, Ordering::Relaxed);
+ }
+
+ pub fn increment_current_buffered_bundles_count(&mut self, count: u64) {
+ self.current_buffered_bundles_count
+ .fetch_add(count, Ordering::Relaxed);
+ }
+
+ pub fn increment_current_buffered_packets_count(&mut self, count: u64) {
+ self.current_buffered_packets_count
+ .fetch_add(count, Ordering::Relaxed);
+ }
+
+ pub fn increment_cost_model_buffered_bundles_count(&mut self, count: u64) {
+ self.cost_model_buffered_bundles_count
+ .fetch_add(count, Ordering::Relaxed);
+ }
+
+ pub fn increment_cost_model_buffered_packets_count(&mut self, count: u64) {
+ self.cost_model_buffered_packets_count
+ .fetch_add(count, Ordering::Relaxed);
+ }
+
+ pub fn increment_num_bundles_dropped(&mut self, count: u64) {
+ self.num_bundles_dropped.fetch_add(count, Ordering::Relaxed);
+ }
+
+ pub fn increment_receive_and_buffer_bundles_elapsed_us(&mut self, count: u64) {
+ self.receive_and_buffer_bundles_elapsed_us
+ .fetch_add(count, Ordering::Relaxed);
+ }
+
+ pub fn increment_process_buffered_bundles_elapsed_us(&mut self, count: u64) {
+ self.process_buffered_bundles_elapsed_us
+ .fetch_add(count, Ordering::Relaxed);
+ }
+}
+
+impl BundleStageLoopMetrics {
+ fn maybe_report(&mut self, report_interval_ms: u64) {
+ if self.last_report.should_update(report_interval_ms) {
+ datapoint_info!(
+ "bundle_stage-loop_stats",
+ ("id", self.id, i64),
+ (
+ "num_bundles_received",
+ self.num_bundles_received.swap(0, Ordering::Acquire) as i64,
+ i64
+ ),
+ (
+ "num_packets_received",
+ self.num_packets_received.swap(0, Ordering::Acquire) as i64,
+ i64
+ ),
+ (
+ "newly_buffered_bundles_count",
+ self.newly_buffered_bundles_count.swap(0, Ordering::Acquire) as i64,
+ i64
+ ),
+ (
+ "current_buffered_bundles_count",
+ self.current_buffered_bundles_count
+ .swap(0, Ordering::Acquire) as i64,
+ i64
+ ),
+ (
+ "current_buffered_packets_count",
+ self.current_buffered_packets_count
+ .swap(0, Ordering::Acquire) as i64,
+ i64
+ ),
+ (
+ "num_bundles_dropped",
+ self.num_bundles_dropped.swap(0, Ordering::Acquire) as i64,
+ i64
+ ),
+ (
+ "receive_and_buffer_bundles_elapsed_us",
+ self.receive_and_buffer_bundles_elapsed_us
+ .swap(0, Ordering::Acquire) as i64,
+ i64
+ ),
+ (
+ "process_buffered_bundles_elapsed_us",
+ self.process_buffered_bundles_elapsed_us
+ .swap(0, Ordering::Acquire) as i64,
+ i64
+ ),
+ );
+ }
+ }
+}
+
+pub struct BundleStage {
+ bundle_thread: JoinHandle<()>,
+}
+
+impl BundleStage {
+ #[allow(clippy::new_ret_no_self)]
+ #[allow(clippy::too_many_arguments)]
+ pub fn new(
+ cluster_info: &Arc,
+ poh_recorder: &Arc>,
+ bundle_receiver: Receiver>,
+ transaction_status_sender: Option,
+ replay_vote_sender: ReplayVoteSender,
+ log_messages_bytes_limit: Option,
+ exit: Arc,
+ tip_manager: TipManager,
+ bundle_account_locker: BundleAccountLocker,
+ block_builder_fee_info: &Arc>,
+ preallocated_bundle_cost: u64,
+ bank_forks: Arc>,
+ prioritization_fee_cache: &Arc,
+ ) -> Self {
+ Self::start_bundle_thread(
+ cluster_info,
+ poh_recorder,
+ bundle_receiver,
+ transaction_status_sender,
+ replay_vote_sender,
+ log_messages_bytes_limit,
+ exit,
+ tip_manager,
+ bundle_account_locker,
+ MAX_BUNDLE_RETRY_DURATION,
+ block_builder_fee_info,
+ preallocated_bundle_cost,
+ bank_forks,
+ prioritization_fee_cache,
+ )
+ }
+
+ pub fn join(self) -> thread::Result<()> {
+ self.bundle_thread.join()
+ }
+
+ #[allow(clippy::too_many_arguments)]
+ fn start_bundle_thread(
+ cluster_info: &Arc,
+ poh_recorder: &Arc>,
+ bundle_receiver: Receiver>,
+ transaction_status_sender: Option,
+ replay_vote_sender: ReplayVoteSender,
+ log_message_bytes_limit: Option,
+ exit: Arc,
+ tip_manager: TipManager,
+ bundle_account_locker: BundleAccountLocker,
+ max_bundle_retry_duration: Duration,
+ block_builder_fee_info: &Arc>,
+ preallocated_bundle_cost: u64,
+ bank_forks: Arc>,
+ prioritization_fee_cache: &Arc,
+ ) -> Self {
+ const BUNDLE_STAGE_ID: u32 = 10_000;
+ let poh_recorder = poh_recorder.clone();
+ let cluster_info = cluster_info.clone();
+
+ let mut bundle_receiver =
+ BundleReceiver::new(BUNDLE_STAGE_ID, bundle_receiver, bank_forks, Some(5));
+
+ let committer = Committer::new(
+ transaction_status_sender,
+ replay_vote_sender,
+ prioritization_fee_cache.clone(),
+ );
+ let decision_maker = DecisionMaker::new(cluster_info.id(), poh_recorder.clone());
+
+ let unprocessed_bundle_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(1_000),
+ VecDeque::with_capacity(1_000),
+ );
+
+ let reserved_ticks = poh_recorder
+ .read()
+ .unwrap()
+ .ticks_per_slot()
+ .saturating_mul(8)
+ .saturating_div(10);
+
+ // The first 80% of the block, based on poh ticks, has `preallocated_bundle_cost` less compute units.
+ // The last 20% has has full compute so blockspace is maximized if BundleStage is idle.
+ let reserved_space = BundleReservedSpaceManager::new(
+ MAX_BLOCK_UNITS,
+ preallocated_bundle_cost,
+ reserved_ticks,
+ );
+
+ let consumer = BundleConsumer::new(
+ committer,
+ poh_recorder.read().unwrap().new_recorder(),
+ QosService::new(BUNDLE_STAGE_ID),
+ log_message_bytes_limit,
+ tip_manager,
+ bundle_account_locker,
+ block_builder_fee_info.clone(),
+ max_bundle_retry_duration,
+ cluster_info,
+ reserved_space,
+ );
+
+ let bundle_thread = Builder::new()
+ .name("solBundleStgTx".to_string())
+ .spawn(move || {
+ Self::process_loop(
+ &mut bundle_receiver,
+ decision_maker,
+ consumer,
+ BUNDLE_STAGE_ID,
+ unprocessed_bundle_storage,
+ exit,
+ );
+ })
+ .unwrap();
+
+ Self { bundle_thread }
+ }
+
+ #[allow(clippy::too_many_arguments)]
+ fn process_loop(
+ bundle_receiver: &mut BundleReceiver,
+ decision_maker: DecisionMaker,
+ mut consumer: BundleConsumer,
+ id: u32,
+ mut unprocessed_bundle_storage: UnprocessedTransactionStorage,
+ exit: Arc,
+ ) {
+ let mut last_metrics_update = Instant::now();
+
+ let mut bundle_stage_metrics = BundleStageLoopMetrics::new(id);
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(id);
+
+ while !exit.load(Ordering::Relaxed) {
+ if !unprocessed_bundle_storage.is_empty()
+ || last_metrics_update.elapsed() >= SLOT_BOUNDARY_CHECK_PERIOD
+ {
+ let (_, process_buffered_packets_time) = measure!(
+ Self::process_buffered_bundles(
+ &decision_maker,
+ &mut consumer,
+ &mut unprocessed_bundle_storage,
+ &mut bundle_stage_leader_metrics,
+ ),
+ "process_buffered_packets",
+ );
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .increment_process_buffered_packets_us(process_buffered_packets_time.as_us());
+ last_metrics_update = Instant::now();
+ }
+
+ match bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_bundle_storage,
+ &mut bundle_stage_metrics,
+ &mut bundle_stage_leader_metrics,
+ ) {
+ Ok(_) | Err(RecvTimeoutError::Timeout) => (),
+ Err(RecvTimeoutError::Disconnected) => break,
+ }
+
+ let bundle_storage = unprocessed_bundle_storage.bundle_storage().unwrap();
+ bundle_stage_metrics.increment_current_buffered_bundles_count(
+ bundle_storage.unprocessed_bundles_len() as u64,
+ );
+ bundle_stage_metrics.increment_current_buffered_packets_count(
+ bundle_storage.unprocessed_packets_len() as u64,
+ );
+ bundle_stage_metrics.increment_cost_model_buffered_bundles_count(
+ bundle_storage.cost_model_buffered_bundles_len() as u64,
+ );
+ bundle_stage_metrics.increment_cost_model_buffered_packets_count(
+ bundle_storage.cost_model_buffered_packets_len() as u64,
+ );
+ bundle_stage_metrics.maybe_report(1_000);
+ }
+ }
+
+ #[allow(clippy::too_many_arguments)]
+ fn process_buffered_bundles(
+ decision_maker: &DecisionMaker,
+ consumer: &mut BundleConsumer,
+ unprocessed_bundle_storage: &mut UnprocessedTransactionStorage,
+ bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics,
+ ) {
+ let (decision, make_decision_time) =
+ measure!(decision_maker.make_consume_or_forward_decision());
+
+ let (metrics_action, banking_stage_metrics_action) =
+ bundle_stage_leader_metrics.check_leader_slot_boundary(decision.bank_start());
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .increment_make_decision_us(make_decision_time.as_us());
+
+ match decision {
+ // BufferedPacketsDecision::Consume means this leader is scheduled to be running at the moment.
+ // Execute, record, and commit as many bundles possible given time, compute, and other constraints.
+ BufferedPacketsDecision::Consume(bank_start) => {
+ // Take metrics action before consume packets (potentially resetting the
+ // slot metrics tracker to the next slot) so that we don't count the
+ // packet processing metrics from the next slot towards the metrics
+ // of the previous slot
+ bundle_stage_leader_metrics
+ .apply_action(metrics_action, banking_stage_metrics_action);
+
+ let (_, consume_buffered_packets_time) = measure!(
+ consumer.consume_buffered_bundles(
+ &bank_start,
+ unprocessed_bundle_storage,
+ bundle_stage_leader_metrics,
+ ),
+ "consume_buffered_bundles",
+ );
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .increment_consume_buffered_packets_us(consume_buffered_packets_time.as_us());
+ }
+ // BufferedPacketsDecision::Forward means the leader is slot is far away.
+ // Bundles aren't forwarded because it breaks atomicity guarantees, so just drop them.
+ BufferedPacketsDecision::Forward => {
+ let (_num_bundles_cleared, _num_cost_model_buffered_bundles) =
+ unprocessed_bundle_storage.bundle_storage().unwrap().reset();
+
+ // TODO (LB): add metrics here for how many bundles were cleared
+
+ bundle_stage_leader_metrics
+ .apply_action(metrics_action, banking_stage_metrics_action);
+ }
+ // BufferedPacketsDecision::ForwardAndHold | BufferedPacketsDecision::Hold means the validator
+ // is approaching the leader slot, hold bundles. Also, bundles aren't forwarded because it breaks
+ // atomicity guarantees
+ BufferedPacketsDecision::ForwardAndHold | BufferedPacketsDecision::Hold => {
+ bundle_stage_leader_metrics
+ .apply_action(metrics_action, banking_stage_metrics_action);
+ }
+ }
+ }
+}
diff --git a/core/src/bundle_stage/bundle_account_locker.rs b/core/src/bundle_stage/bundle_account_locker.rs
new file mode 100644
index 00000000000000..5ea8b5396da9ac
--- /dev/null
+++ b/core/src/bundle_stage/bundle_account_locker.rs
@@ -0,0 +1,326 @@
+//! Handles pre-locking bundle accounts so that accounts bundles touch can be reserved ahead
+// of time for execution. Also, ensures that ALL accounts mentioned across a bundle are locked
+// to avoid race conditions between BundleStage and BankingStage.
+//
+// For instance, imagine a bundle with three transactions and the set of accounts for each transaction
+// is: {{A, B}, {B, C}, {C, D}}. We need to lock A, B, and C even though only one is executed at a time.
+// Imagine BundleStage is in the middle of processing {C, D} and we didn't have a lock on accounts {A, B, C}.
+// In this situation, there's a chance that BankingStage can process a transaction containing A or B
+// and commit the results before the bundle completes. By the time the bundle commits the new account
+// state for {A, B, C}, A and B would be incorrect and the entries containing the bundle would be
+// replayed improperly and that leader would have produced an invalid block.
+use {
+ solana_runtime::bank::Bank,
+ solana_sdk::{bundle::SanitizedBundle, pubkey::Pubkey, transaction::TransactionAccountLocks},
+ std::{
+ collections::{hash_map::Entry, HashMap, HashSet},
+ sync::{Arc, Mutex, MutexGuard},
+ },
+ thiserror::Error,
+};
+
+#[derive(Clone, Error, Debug)]
+pub enum BundleAccountLockerError {
+ #[error("locking error")]
+ LockingError,
+}
+
+pub type BundleAccountLockerResult = Result;
+
+pub struct LockedBundle<'a, 'b> {
+ bundle_account_locker: &'a BundleAccountLocker,
+ sanitized_bundle: &'b SanitizedBundle,
+ bank: Arc,
+}
+
+impl<'a, 'b> LockedBundle<'a, 'b> {
+ pub fn new(
+ bundle_account_locker: &'a BundleAccountLocker,
+ sanitized_bundle: &'b SanitizedBundle,
+ bank: &Arc,
+ ) -> Self {
+ Self {
+ bundle_account_locker,
+ sanitized_bundle,
+ bank: bank.clone(),
+ }
+ }
+
+ pub fn sanitized_bundle(&self) -> &SanitizedBundle {
+ self.sanitized_bundle
+ }
+}
+
+// Automatically unlock bundle accounts when destructed
+impl<'a, 'b> Drop for LockedBundle<'a, 'b> {
+ fn drop(&mut self) {
+ let _ = self
+ .bundle_account_locker
+ .unlock_bundle_accounts(self.sanitized_bundle, &self.bank);
+ }
+}
+
+#[derive(Default, Clone)]
+pub struct BundleAccountLocks {
+ read_locks: HashMap,
+ write_locks: HashMap,
+}
+
+impl BundleAccountLocks {
+ pub fn read_locks(&self) -> HashSet {
+ self.read_locks.keys().cloned().collect()
+ }
+
+ pub fn write_locks(&self) -> HashSet {
+ self.write_locks.keys().cloned().collect()
+ }
+
+ pub fn lock_accounts(
+ &mut self,
+ read_locks: HashMap,
+ write_locks: HashMap,
+ ) {
+ for (acc, count) in read_locks {
+ *self.read_locks.entry(acc).or_insert(0) += count;
+ }
+ for (acc, count) in write_locks {
+ *self.write_locks.entry(acc).or_insert(0) += count;
+ }
+ }
+
+ pub fn unlock_accounts(
+ &mut self,
+ read_locks: HashMap,
+ write_locks: HashMap,
+ ) {
+ for (acc, count) in read_locks {
+ if let Entry::Occupied(mut entry) = self.read_locks.entry(acc) {
+ let val = entry.get_mut();
+ *val = val.saturating_sub(count);
+ if entry.get() == &0 {
+ let _ = entry.remove();
+ }
+ } else {
+ warn!("error unlocking read-locked account, account: {:?}", acc);
+ }
+ }
+ for (acc, count) in write_locks {
+ if let Entry::Occupied(mut entry) = self.write_locks.entry(acc) {
+ let val = entry.get_mut();
+ *val = val.saturating_sub(count);
+ if entry.get() == &0 {
+ let _ = entry.remove();
+ }
+ } else {
+ warn!("error unlocking write-locked account, account: {:?}", acc);
+ }
+ }
+ }
+}
+
+#[derive(Clone, Default)]
+pub struct BundleAccountLocker {
+ account_locks: Arc>,
+}
+
+impl BundleAccountLocker {
+ /// used in BankingStage during TransactionBatch construction to ensure that BankingStage
+ /// doesn't lock anything currently locked in the BundleAccountLocker
+ pub fn read_locks(&self) -> HashSet {
+ self.account_locks.lock().unwrap().read_locks()
+ }
+
+ /// used in BankingStage during TransactionBatch construction to ensure that BankingStage
+ /// doesn't lock anything currently locked in the BundleAccountLocker
+ pub fn write_locks(&self) -> HashSet {
+ self.account_locks.lock().unwrap().write_locks()
+ }
+
+ /// used in BankingStage during TransactionBatch construction to ensure that BankingStage
+ /// doesn't lock anything currently locked in the BundleAccountLocker
+ pub fn account_locks(&self) -> MutexGuard {
+ self.account_locks.lock().unwrap()
+ }
+
+ /// Prepares a locked bundle and returns a LockedBundle containing locked accounts.
+ /// When a LockedBundle is dropped, the accounts are automatically unlocked
+ pub fn prepare_locked_bundle<'a, 'b>(
+ &'a self,
+ sanitized_bundle: &'b SanitizedBundle,
+ bank: &Arc,
+ ) -> BundleAccountLockerResult> {
+ let (read_locks, write_locks) = Self::get_read_write_locks(sanitized_bundle, bank)?;
+
+ self.account_locks
+ .lock()
+ .unwrap()
+ .lock_accounts(read_locks, write_locks);
+ Ok(LockedBundle::new(self, sanitized_bundle, bank))
+ }
+
+ /// Unlocks bundle accounts. Note that LockedBundle::drop will auto-drop the bundle account locks
+ fn unlock_bundle_accounts(
+ &self,
+ sanitized_bundle: &SanitizedBundle,
+ bank: &Bank,
+ ) -> BundleAccountLockerResult<()> {
+ let (read_locks, write_locks) = Self::get_read_write_locks(sanitized_bundle, bank)?;
+
+ self.account_locks
+ .lock()
+ .unwrap()
+ .unlock_accounts(read_locks, write_locks);
+ Ok(())
+ }
+
+ /// Returns the read and write locks for this bundle
+ /// Each lock type contains a HashMap which maps Pubkey to number of locks held
+ fn get_read_write_locks(
+ bundle: &SanitizedBundle,
+ bank: &Bank,
+ ) -> BundleAccountLockerResult<(HashMap, HashMap)> {
+ let transaction_locks: Vec = bundle
+ .transactions
+ .iter()
+ .filter_map(|tx| {
+ tx.get_account_locks(bank.get_transaction_account_lock_limit())
+ .ok()
+ })
+ .collect();
+
+ if transaction_locks.len() != bundle.transactions.len() {
+ return Err(BundleAccountLockerError::LockingError);
+ }
+
+ let bundle_read_locks = transaction_locks
+ .iter()
+ .flat_map(|tx| tx.readonly.iter().map(|a| **a));
+ let bundle_read_locks =
+ bundle_read_locks
+ .into_iter()
+ .fold(HashMap::new(), |mut map, acc| {
+ *map.entry(acc).or_insert(0) += 1;
+ map
+ });
+
+ let bundle_write_locks = transaction_locks
+ .iter()
+ .flat_map(|tx| tx.writable.iter().map(|a| **a));
+ let bundle_write_locks =
+ bundle_write_locks
+ .into_iter()
+ .fold(HashMap::new(), |mut map, acc| {
+ *map.entry(acc).or_insert(0) += 1;
+ map
+ });
+
+ Ok((bundle_read_locks, bundle_write_locks))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use {
+ crate::{
+ bundle_stage::bundle_account_locker::BundleAccountLocker,
+ immutable_deserialized_bundle::ImmutableDeserializedBundle,
+ packet_bundle::PacketBundle,
+ },
+ solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics,
+ solana_ledger::genesis_utils::create_genesis_config,
+ solana_perf::packet::PacketBatch,
+ solana_runtime::{bank::Bank, genesis_utils::GenesisConfigInfo},
+ solana_sdk::{
+ packet::Packet, signature::Signer, signer::keypair::Keypair, system_program,
+ system_transaction::transfer, transaction::VersionedTransaction,
+ },
+ std::{collections::HashSet, sync::Arc},
+ };
+
+ #[test]
+ fn test_simple_lock_bundles() {
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(2);
+ let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let bundle_account_locker = BundleAccountLocker::default();
+
+ let kp0 = Keypair::new();
+ let kp1 = Keypair::new();
+
+ let tx0 = VersionedTransaction::from(transfer(
+ &mint_keypair,
+ &kp0.pubkey(),
+ 1,
+ genesis_config.hash(),
+ ));
+ let tx1 = VersionedTransaction::from(transfer(
+ &mint_keypair,
+ &kp1.pubkey(),
+ 1,
+ genesis_config.hash(),
+ ));
+
+ let mut packet_bundle0 = PacketBundle {
+ batch: PacketBatch::new(vec![Packet::from_data(None, &tx0).unwrap()]),
+ bundle_id: tx0.signatures[0].to_string(),
+ };
+ let mut packet_bundle1 = PacketBundle {
+ batch: PacketBatch::new(vec![Packet::from_data(None, &tx1).unwrap()]),
+ bundle_id: tx1.signatures[0].to_string(),
+ };
+
+ let mut transaction_errors = TransactionErrorMetrics::default();
+
+ let sanitized_bundle0 = ImmutableDeserializedBundle::new(&mut packet_bundle0, None)
+ .unwrap()
+ .build_sanitized_bundle(&bank, &HashSet::default(), &mut transaction_errors)
+ .expect("sanitize bundle 0");
+ let sanitized_bundle1 = ImmutableDeserializedBundle::new(&mut packet_bundle1, None)
+ .unwrap()
+ .build_sanitized_bundle(&bank, &HashSet::default(), &mut transaction_errors)
+ .expect("sanitize bundle 1");
+
+ let locked_bundle0 = bundle_account_locker
+ .prepare_locked_bundle(&sanitized_bundle0, &bank)
+ .unwrap();
+
+ assert_eq!(
+ bundle_account_locker.write_locks(),
+ HashSet::from_iter([mint_keypair.pubkey(), kp0.pubkey()])
+ );
+ assert_eq!(
+ bundle_account_locker.read_locks(),
+ HashSet::from_iter([system_program::id()])
+ );
+
+ let locked_bundle1 = bundle_account_locker
+ .prepare_locked_bundle(&sanitized_bundle1, &bank)
+ .unwrap();
+ assert_eq!(
+ bundle_account_locker.write_locks(),
+ HashSet::from_iter([mint_keypair.pubkey(), kp0.pubkey(), kp1.pubkey()])
+ );
+ assert_eq!(
+ bundle_account_locker.read_locks(),
+ HashSet::from_iter([system_program::id()])
+ );
+
+ drop(locked_bundle0);
+ assert_eq!(
+ bundle_account_locker.write_locks(),
+ HashSet::from_iter([mint_keypair.pubkey(), kp1.pubkey()])
+ );
+ assert_eq!(
+ bundle_account_locker.read_locks(),
+ HashSet::from_iter([system_program::id()])
+ );
+
+ drop(locked_bundle1);
+ assert!(bundle_account_locker.write_locks().is_empty());
+ assert!(bundle_account_locker.read_locks().is_empty());
+ }
+}
diff --git a/core/src/bundle_stage/bundle_consumer.rs b/core/src/bundle_stage/bundle_consumer.rs
new file mode 100644
index 00000000000000..beaf98bd5559fd
--- /dev/null
+++ b/core/src/bundle_stage/bundle_consumer.rs
@@ -0,0 +1,1593 @@
+use {
+ crate::{
+ banking_stage::{
+ committer::CommitTransactionDetails, leader_slot_metrics::ProcessTransactionsSummary,
+ leader_slot_timing_metrics::LeaderExecuteAndCommitTimings, qos_service::QosService,
+ unprocessed_transaction_storage::UnprocessedTransactionStorage,
+ },
+ bundle_stage::{
+ bundle_account_locker::{BundleAccountLocker, LockedBundle},
+ bundle_reserved_space_manager::BundleReservedSpaceManager,
+ bundle_stage_leader_metrics::BundleStageLeaderMetrics,
+ committer::Committer,
+ },
+ consensus_cache_updater::ConsensusCacheUpdater,
+ immutable_deserialized_bundle::ImmutableDeserializedBundle,
+ proxy::block_engine_stage::BlockBuilderFeeInfo,
+ tip_manager::TipManager,
+ },
+ solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics,
+ solana_bundle::{
+ bundle_execution::{load_and_execute_bundle, BundleExecutionMetrics},
+ BundleExecutionError, BundleExecutionResult, TipError,
+ },
+ solana_cost_model::transaction_cost::TransactionCost,
+ solana_gossip::cluster_info::ClusterInfo,
+ solana_measure::{measure, measure_us},
+ solana_poh::poh_recorder::{BankStart, RecordTransactionsSummary, TransactionRecorder},
+ solana_runtime::bank::Bank,
+ solana_sdk::{
+ bundle::SanitizedBundle,
+ clock::{Slot, MAX_PROCESSING_AGE},
+ feature_set,
+ pubkey::Pubkey,
+ transaction::{self},
+ },
+ std::{
+ collections::HashSet,
+ sync::{Arc, Mutex},
+ time::{Duration, Instant},
+ },
+};
+
+pub struct ExecuteRecordCommitResult {
+ commit_transaction_details: Vec,
+ result: BundleExecutionResult<()>,
+ execution_metrics: BundleExecutionMetrics,
+ execute_and_commit_timings: LeaderExecuteAndCommitTimings,
+ transaction_error_counter: TransactionErrorMetrics,
+}
+
+pub struct BundleConsumer {
+ committer: Committer,
+ transaction_recorder: TransactionRecorder,
+ qos_service: QosService,
+ log_messages_bytes_limit: Option,
+
+ consensus_cache_updater: ConsensusCacheUpdater,
+
+ tip_manager: TipManager,
+ last_tip_update_slot: Slot,
+
+ blacklisted_accounts: HashSet,
+
+ // Manages account locks across multiple transactions within a bundle to prevent race conditions
+ // with BankingStage
+ bundle_account_locker: BundleAccountLocker,
+
+ block_builder_fee_info: Arc>,
+
+ max_bundle_retry_duration: Duration,
+
+ cluster_info: Arc,
+
+ reserved_space: BundleReservedSpaceManager,
+}
+
+impl BundleConsumer {
+ #[allow(clippy::too_many_arguments)]
+ pub fn new(
+ committer: Committer,
+ transaction_recorder: TransactionRecorder,
+ qos_service: QosService,
+ log_messages_bytes_limit: Option,
+ tip_manager: TipManager,
+ bundle_account_locker: BundleAccountLocker,
+ block_builder_fee_info: Arc>,
+ max_bundle_retry_duration: Duration,
+ cluster_info: Arc,
+ reserved_space: BundleReservedSpaceManager,
+ ) -> Self {
+ Self {
+ committer,
+ transaction_recorder,
+ qos_service,
+ log_messages_bytes_limit,
+ consensus_cache_updater: ConsensusCacheUpdater::default(),
+ tip_manager,
+ // MAX because sending tips during slot 0 in tests doesn't work
+ last_tip_update_slot: u64::MAX,
+ blacklisted_accounts: HashSet::default(),
+ bundle_account_locker,
+ block_builder_fee_info,
+ max_bundle_retry_duration,
+ cluster_info,
+ reserved_space,
+ }
+ }
+
+ // A bundle is a series of transactions to be executed sequentially, atomically, and all-or-nothing.
+ // Sequentially:
+ // - Transactions are executed in order
+ // Atomically:
+ // - All transactions in a bundle get recoded to PoH and committed to the bank in the same slot. Account locks
+ // for all accounts in all transactions in a bundle are held during the entire execution to remove POH record race conditions
+ // with transactions in BankingStage.
+ // All-or-nothing:
+ // - All transactions are committed or none. Modified state for the entire bundle isn't recorded to PoH and committed to the
+ // bank until all transactions in the bundle have executed.
+ //
+ // Some corner cases to be aware of when working with BundleStage:
+ // A bundle is not allowed to call the Tip Payment program in a bundle (or BankingStage).
+ // - This is to avoid stealing of tips by malicious parties with bundles that crank the tip
+ // payment program and set the tip receiver to themself.
+ // A bundle is not allowed to touch consensus-related accounts
+ // - This is to avoid stalling the voting BankingStage threads.
+ pub fn consume_buffered_bundles(
+ &mut self,
+ bank_start: &BankStart,
+ unprocessed_transaction_storage: &mut UnprocessedTransactionStorage,
+ bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics,
+ ) {
+ self.maybe_update_blacklist(bank_start);
+ self.reserved_space.tick(&bank_start.working_bank);
+
+ let reached_end_of_slot = unprocessed_transaction_storage.process_bundles(
+ bank_start.working_bank.clone(),
+ bundle_stage_leader_metrics,
+ &self.blacklisted_accounts,
+ |bundles, bundle_stage_leader_metrics| {
+ Self::do_process_bundles(
+ &self.bundle_account_locker,
+ &self.tip_manager,
+ &mut self.last_tip_update_slot,
+ &self.cluster_info,
+ &self.block_builder_fee_info,
+ &self.committer,
+ &self.transaction_recorder,
+ &self.qos_service,
+ &self.log_messages_bytes_limit,
+ self.max_bundle_retry_duration,
+ &self.reserved_space,
+ bundles,
+ bank_start,
+ bundle_stage_leader_metrics,
+ )
+ },
+ );
+
+ if reached_end_of_slot {
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .set_end_of_slot_unprocessed_buffer_len(
+ unprocessed_transaction_storage.len() as u64
+ );
+ }
+ }
+
+ /// Blacklist is updated with the tip payment program + any consensus accounts.
+ fn maybe_update_blacklist(&mut self, bank_start: &BankStart) {
+ if self
+ .consensus_cache_updater
+ .maybe_update(&bank_start.working_bank)
+ {
+ self.blacklisted_accounts = self
+ .consensus_cache_updater
+ .consensus_accounts_cache()
+ .union(&HashSet::from_iter([self
+ .tip_manager
+ .tip_payment_program_id()]))
+ .cloned()
+ .collect();
+
+ debug!(
+ "updated blacklist with {} accounts",
+ self.blacklisted_accounts.len()
+ );
+ }
+ }
+
+ #[allow(clippy::too_many_arguments)]
+ fn do_process_bundles(
+ bundle_account_locker: &BundleAccountLocker,
+ tip_manager: &TipManager,
+ last_tip_updated_slot: &mut Slot,
+ cluster_info: &Arc,
+ block_builder_fee_info: &Arc>,
+ committer: &Committer,
+ recorder: &TransactionRecorder,
+ qos_service: &QosService,
+ log_messages_bytes_limit: &Option,
+ max_bundle_retry_duration: Duration,
+ reserved_space: &BundleReservedSpaceManager,
+ bundles: &[(ImmutableDeserializedBundle, SanitizedBundle)],
+ bank_start: &BankStart,
+ bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics,
+ ) -> Vec> {
+ // BundleAccountLocker holds RW locks for ALL accounts in ALL transactions within a single bundle.
+ // By pre-locking bundles before they're ready to be processed, it will prevent BankingStage from
+ // grabbing those locks so BundleStage can process as fast as possible.
+ // A LockedBundle is similar to TransactionBatch; once its dropped the locks are released.
+ #[allow(clippy::needless_collect)]
+ let (locked_bundle_results, locked_bundles_elapsed) = measure!(
+ bundles
+ .iter()
+ .map(|(_, sanitized_bundle)| {
+ bundle_account_locker
+ .prepare_locked_bundle(sanitized_bundle, &bank_start.working_bank)
+ })
+ .collect::>(),
+ "locked_bundles_elapsed"
+ );
+ bundle_stage_leader_metrics
+ .bundle_stage_metrics_tracker()
+ .increment_locked_bundle_elapsed_us(locked_bundles_elapsed.as_us());
+
+ let (execution_results, execute_locked_bundles_elapsed) = measure!(locked_bundle_results
+ .into_iter()
+ .map(|r| match r {
+ Ok(locked_bundle) => {
+ let (r, measure) = measure_us!(Self::process_bundle(
+ bundle_account_locker,
+ tip_manager,
+ last_tip_updated_slot,
+ cluster_info,
+ block_builder_fee_info,
+ committer,
+ recorder,
+ qos_service,
+ log_messages_bytes_limit,
+ max_bundle_retry_duration,
+ reserved_space,
+ &locked_bundle,
+ bank_start,
+ bundle_stage_leader_metrics,
+ ));
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .increment_process_packets_transactions_us(measure);
+ r
+ }
+ Err(_) => {
+ Err(BundleExecutionError::LockError)
+ }
+ })
+ .collect::>());
+
+ bundle_stage_leader_metrics
+ .bundle_stage_metrics_tracker()
+ .increment_execute_locked_bundles_elapsed_us(execute_locked_bundles_elapsed.as_us());
+ execution_results.iter().for_each(|result| {
+ bundle_stage_leader_metrics
+ .bundle_stage_metrics_tracker()
+ .increment_bundle_execution_result(result);
+ });
+
+ execution_results
+ }
+
+ #[allow(clippy::too_many_arguments)]
+ fn process_bundle(
+ bundle_account_locker: &BundleAccountLocker,
+ tip_manager: &TipManager,
+ last_tip_updated_slot: &mut Slot,
+ cluster_info: &Arc,
+ block_builder_fee_info: &Arc>,
+ committer: &Committer,
+ recorder: &TransactionRecorder,
+ qos_service: &QosService,
+ log_messages_bytes_limit: &Option,
+ max_bundle_retry_duration: Duration,
+ reserved_space: &BundleReservedSpaceManager,
+ locked_bundle: &LockedBundle,
+ bank_start: &BankStart,
+ bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics,
+ ) -> Result<(), BundleExecutionError> {
+ if !Bank::should_bank_still_be_processing_txs(
+ &bank_start.bank_creation_time,
+ bank_start.working_bank.ns_per_slot,
+ ) {
+ return Err(BundleExecutionError::BankProcessingTimeLimitReached);
+ }
+
+ if Self::bundle_touches_tip_pdas(
+ locked_bundle.sanitized_bundle(),
+ &tip_manager.get_tip_accounts(),
+ ) && bank_start.working_bank.slot() != *last_tip_updated_slot
+ {
+ let start = Instant::now();
+ let result = Self::handle_tip_programs(
+ bundle_account_locker,
+ tip_manager,
+ cluster_info,
+ block_builder_fee_info,
+ committer,
+ recorder,
+ qos_service,
+ log_messages_bytes_limit,
+ max_bundle_retry_duration,
+ reserved_space,
+ bank_start,
+ bundle_stage_leader_metrics,
+ );
+
+ bundle_stage_leader_metrics
+ .bundle_stage_metrics_tracker()
+ .increment_change_tip_receiver_elapsed_us(start.elapsed().as_micros() as u64);
+
+ result?;
+
+ *last_tip_updated_slot = bank_start.working_bank.slot();
+ }
+
+ Self::update_qos_and_execute_record_commit_bundle(
+ committer,
+ recorder,
+ qos_service,
+ log_messages_bytes_limit,
+ max_bundle_retry_duration,
+ reserved_space,
+ locked_bundle.sanitized_bundle(),
+ bank_start,
+ bundle_stage_leader_metrics,
+ )?;
+
+ Ok(())
+ }
+
+ /// The validator needs to manage state on two programs related to tips
+ #[allow(clippy::too_many_arguments)]
+ fn handle_tip_programs(
+ bundle_account_locker: &BundleAccountLocker,
+ tip_manager: &TipManager,
+ cluster_info: &Arc,
+ block_builder_fee_info: &Arc>,
+ committer: &Committer,
+ recorder: &TransactionRecorder,
+ qos_service: &QosService,
+ log_messages_bytes_limit: &Option,
+ max_bundle_retry_duration: Duration,
+ reserved_space: &BundleReservedSpaceManager,
+ bank_start: &BankStart,
+ bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics,
+ ) -> Result<(), BundleExecutionError> {
+ debug!("handle_tip_programs");
+
+ // This will setup the tip payment and tip distribution program if they haven't been
+ // initialized yet, which is typically helpful for local validators. On mainnet and testnet,
+ // this code should never run.
+ let keypair = cluster_info.keypair().clone();
+ let initialize_tip_programs_bundle =
+ tip_manager.get_initialize_tip_programs_bundle(&bank_start.working_bank, &keypair);
+ if let Some(bundle) = initialize_tip_programs_bundle {
+ debug!(
+ "initializing tip programs with {} transactions, bundle id: {}",
+ bundle.transactions.len(),
+ bundle.bundle_id
+ );
+
+ let locked_init_tip_programs_bundle = bundle_account_locker
+ .prepare_locked_bundle(&bundle, &bank_start.working_bank)
+ .map_err(|_| BundleExecutionError::TipError(TipError::LockError))?;
+
+ Self::update_qos_and_execute_record_commit_bundle(
+ committer,
+ recorder,
+ qos_service,
+ log_messages_bytes_limit,
+ max_bundle_retry_duration,
+ reserved_space,
+ locked_init_tip_programs_bundle.sanitized_bundle(),
+ bank_start,
+ bundle_stage_leader_metrics,
+ )
+ .map_err(|e| {
+ bundle_stage_leader_metrics
+ .bundle_stage_metrics_tracker()
+ .increment_num_init_tip_account_errors(1);
+ error!(
+ "bundle: {} error initializing tip programs: {:?}",
+ locked_init_tip_programs_bundle.sanitized_bundle().bundle_id,
+ e
+ );
+ BundleExecutionError::TipError(TipError::InitializeProgramsError)
+ })?;
+
+ bundle_stage_leader_metrics
+ .bundle_stage_metrics_tracker()
+ .increment_num_init_tip_account_ok(1);
+ }
+
+ // There are two frequently run internal cranks inside the jito-solana validator that have to do with managing MEV tips.
+ // One is initialize the TipDistributionAccount, which is a validator's "tip piggy bank" for an epoch
+ // The other is ensuring the tip_receiver is configured correctly to ensure tips are routed to the correct
+ // address. The validator must drain the tip accounts to the previous tip receiver before setting the tip receiver to
+ // themselves.
+
+ let kp = cluster_info.keypair().clone();
+ let tip_crank_bundle = tip_manager.get_tip_programs_crank_bundle(
+ &bank_start.working_bank,
+ &kp,
+ &block_builder_fee_info.lock().unwrap(),
+ )?;
+ debug!("tip_crank_bundle is_some: {}", tip_crank_bundle.is_some());
+
+ if let Some(bundle) = tip_crank_bundle {
+ info!(
+ "bundle id: {} cranking tip programs with {} transactions",
+ bundle.bundle_id,
+ bundle.transactions.len()
+ );
+
+ let locked_tip_crank_bundle = bundle_account_locker
+ .prepare_locked_bundle(&bundle, &bank_start.working_bank)
+ .map_err(|_| BundleExecutionError::TipError(TipError::LockError))?;
+
+ Self::update_qos_and_execute_record_commit_bundle(
+ committer,
+ recorder,
+ qos_service,
+ log_messages_bytes_limit,
+ max_bundle_retry_duration,
+ reserved_space,
+ locked_tip_crank_bundle.sanitized_bundle(),
+ bank_start,
+ bundle_stage_leader_metrics,
+ )
+ .map_err(|e| {
+ bundle_stage_leader_metrics
+ .bundle_stage_metrics_tracker()
+ .increment_num_change_tip_receiver_errors(1);
+ error!(
+ "bundle: {} error cranking tip programs: {:?}",
+ locked_tip_crank_bundle.sanitized_bundle().bundle_id,
+ e
+ );
+ BundleExecutionError::TipError(TipError::CrankTipError)
+ })?;
+
+ bundle_stage_leader_metrics
+ .bundle_stage_metrics_tracker()
+ .increment_num_change_tip_receiver_ok(1);
+ }
+
+ Ok(())
+ }
+
+ /// Reserves space for the entire bundle up-front to ensure the entire bundle can execute.
+ /// Rolls back the reserved space if there's not enough blockspace for all transactions in the bundle.
+ fn reserve_bundle_blockspace(
+ qos_service: &QosService,
+ reserved_space: &BundleReservedSpaceManager,
+ sanitized_bundle: &SanitizedBundle,
+ bank: &Arc,
+ ) -> BundleExecutionResult<(Vec>, usize)> {
+ let mut write_cost_tracker = bank.write_cost_tracker().unwrap();
+
+ // set the block cost limit to the original block cost limit, run the select + accumulate
+ // then reset back to the expected block cost limit. this allows bundle stage to potentially
+ // increase block_compute_limits, allocate the space, and reset the block_cost_limits to
+ // the reserved space without BankingStage racing to allocate this extra reserved space
+ write_cost_tracker.set_block_cost_limit(reserved_space.block_cost_limit());
+ let (transaction_qos_cost_results, cost_model_throttled_transactions_count) = qos_service
+ .select_and_accumulate_transaction_costs(
+ bank,
+ &mut write_cost_tracker,
+ &sanitized_bundle.transactions,
+ std::iter::repeat(Ok(())),
+ );
+ write_cost_tracker.set_block_cost_limit(reserved_space.expected_block_cost_limits(bank));
+ drop(write_cost_tracker);
+
+ // rollback all transaction costs if it can't fit and
+ if transaction_qos_cost_results.iter().any(|c| c.is_err()) {
+ QosService::remove_costs(transaction_qos_cost_results.iter(), None, bank);
+ return Err(BundleExecutionError::ExceedsCostModel);
+ }
+
+ Ok((
+ transaction_qos_cost_results,
+ cost_model_throttled_transactions_count,
+ ))
+ }
+
+ fn update_qos_and_execute_record_commit_bundle(
+ committer: &Committer,
+ recorder: &TransactionRecorder,
+ qos_service: &QosService,
+ log_messages_bytes_limit: &Option,
+ max_bundle_retry_duration: Duration,
+ reserved_space: &BundleReservedSpaceManager,
+ sanitized_bundle: &SanitizedBundle,
+ bank_start: &BankStart,
+ bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics,
+ ) -> BundleExecutionResult<()> {
+ debug!(
+ "bundle: {} reserving blockspace for {} transactions",
+ sanitized_bundle.bundle_id,
+ sanitized_bundle.transactions.len()
+ );
+
+ let (
+ (transaction_qos_cost_results, _cost_model_throttled_transactions_count),
+ cost_model_elapsed_us,
+ ) = measure_us!(Self::reserve_bundle_blockspace(
+ qos_service,
+ reserved_space,
+ sanitized_bundle,
+ &bank_start.working_bank
+ )?);
+
+ debug!(
+ "bundle: {} executing, recording, and committing",
+ sanitized_bundle.bundle_id
+ );
+
+ let (result, process_transactions_us) = measure_us!(Self::execute_record_commit_bundle(
+ committer,
+ recorder,
+ log_messages_bytes_limit,
+ max_bundle_retry_duration,
+ sanitized_bundle,
+ bank_start,
+ ));
+
+ bundle_stage_leader_metrics
+ .bundle_stage_metrics_tracker()
+ .increment_num_execution_retries(result.execution_metrics.num_retries);
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .accumulate_transaction_errors(&result.transaction_error_counter);
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .increment_process_transactions_us(process_transactions_us);
+
+ let (cu, us) = result
+ .execute_and_commit_timings
+ .execute_timings
+ .accumulate_execute_units_and_time();
+ qos_service.accumulate_actual_execute_cu(cu);
+ qos_service.accumulate_actual_execute_time(us);
+
+ let num_committed = result
+ .commit_transaction_details
+ .iter()
+ .filter(|c| matches!(c, CommitTransactionDetails::Committed { .. }))
+ .count();
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .accumulate_process_transactions_summary(&ProcessTransactionsSummary {
+ reached_max_poh_height: matches!(
+ result.result,
+ Err(BundleExecutionError::BankProcessingTimeLimitReached)
+ | Err(BundleExecutionError::PohRecordError(_))
+ ),
+ transactions_attempted_execution_count: sanitized_bundle.transactions.len(),
+ committed_transactions_count: num_committed,
+ // NOTE: this assumes that bundles are committed all-or-nothing
+ committed_transactions_with_successful_result_count: num_committed,
+ failed_commit_count: 0,
+ retryable_transaction_indexes: vec![],
+ cost_model_throttled_transactions_count: 0,
+ cost_model_us: cost_model_elapsed_us,
+ execute_and_commit_timings: result.execute_and_commit_timings,
+ error_counters: result.transaction_error_counter,
+ });
+
+ match result.result {
+ Ok(_) => {
+ // it's assumed that all transactions in the bundle executed, can update QoS
+ if !bank_start
+ .working_bank
+ .feature_set
+ .is_active(&feature_set::apply_cost_tracker_during_replay::id())
+ {
+ QosService::update_costs(
+ transaction_qos_cost_results.iter(),
+ Some(&result.commit_transaction_details),
+ &bank_start.working_bank,
+ );
+ }
+
+ qos_service.report_metrics(bank_start.working_bank.slot());
+ Ok(())
+ }
+ Err(e) => {
+ // on bundle failure, none of the transactions are committed, so need to revert
+ // all compute reserved
+ QosService::remove_costs(
+ transaction_qos_cost_results.iter(),
+ None,
+ &bank_start.working_bank,
+ );
+ qos_service.report_metrics(bank_start.working_bank.slot());
+
+ Err(e)
+ }
+ }
+ }
+
+ fn execute_record_commit_bundle(
+ committer: &Committer,
+ recorder: &TransactionRecorder,
+ log_messages_bytes_limit: &Option,
+ max_bundle_retry_duration: Duration,
+ sanitized_bundle: &SanitizedBundle,
+ bank_start: &BankStart,
+ ) -> ExecuteRecordCommitResult {
+ let transaction_status_sender_enabled = committer.transaction_status_sender_enabled();
+
+ let mut execute_and_commit_timings = LeaderExecuteAndCommitTimings::default();
+
+ debug!("bundle: {} executing", sanitized_bundle.bundle_id);
+ let default_accounts = vec![None; sanitized_bundle.transactions.len()];
+ let mut bundle_execution_results = load_and_execute_bundle(
+ &bank_start.working_bank,
+ sanitized_bundle,
+ MAX_PROCESSING_AGE,
+ &max_bundle_retry_duration,
+ transaction_status_sender_enabled,
+ transaction_status_sender_enabled,
+ transaction_status_sender_enabled,
+ transaction_status_sender_enabled,
+ log_messages_bytes_limit,
+ false,
+ None,
+ &default_accounts,
+ &default_accounts,
+ );
+
+ let execution_metrics = bundle_execution_results.metrics();
+
+ execute_and_commit_timings.collect_balances_us = execution_metrics.collect_balances_us;
+ execute_and_commit_timings.load_execute_us = execution_metrics.load_execute_us;
+ execute_and_commit_timings
+ .execute_timings
+ .accumulate(&execution_metrics.execute_timings);
+
+ let mut transaction_error_counter = TransactionErrorMetrics::default();
+ bundle_execution_results
+ .bundle_transaction_results()
+ .iter()
+ .for_each(|r| {
+ transaction_error_counter
+ .accumulate(&r.load_and_execute_transactions_output().error_counters);
+ });
+
+ debug!(
+ "bundle: {} executed, is_ok: {}",
+ sanitized_bundle.bundle_id,
+ bundle_execution_results.result().is_ok()
+ );
+
+ // don't commit bundle if failure executing any part of the bundle
+ if let Err(e) = bundle_execution_results.result() {
+ return ExecuteRecordCommitResult {
+ commit_transaction_details: vec![],
+ result: Err(e.clone().into()),
+ execution_metrics,
+ execute_and_commit_timings,
+ transaction_error_counter,
+ };
+ }
+
+ let (executed_batches, execution_results_to_transactions_us) =
+ measure_us!(bundle_execution_results.executed_transaction_batches());
+
+ debug!(
+ "bundle: {} recording {} batches of {:?} transactions",
+ sanitized_bundle.bundle_id,
+ executed_batches.len(),
+ executed_batches
+ .iter()
+ .map(|b| b.len())
+ .collect::>()
+ );
+
+ let (freeze_lock, freeze_lock_us) = measure_us!(bank_start.working_bank.freeze_lock());
+ execute_and_commit_timings.freeze_lock_us = freeze_lock_us;
+
+ let (last_blockhash, lamports_per_signature) = bank_start
+ .working_bank
+ .last_blockhash_and_lamports_per_signature();
+
+ let (
+ RecordTransactionsSummary {
+ result: record_transactions_result,
+ record_transactions_timings,
+ starting_transaction_index,
+ },
+ record_us,
+ ) = measure_us!(
+ recorder.record_transactions(bank_start.working_bank.slot(), executed_batches)
+ );
+
+ execute_and_commit_timings.record_us = record_us;
+ execute_and_commit_timings.record_transactions_timings = record_transactions_timings;
+ execute_and_commit_timings
+ .record_transactions_timings
+ .execution_results_to_transactions_us = execution_results_to_transactions_us;
+
+ debug!(
+ "bundle: {} record result: {}",
+ sanitized_bundle.bundle_id,
+ record_transactions_result.is_ok()
+ );
+
+ // don't commit bundle if failed to record
+ if let Err(e) = record_transactions_result {
+ return ExecuteRecordCommitResult {
+ commit_transaction_details: vec![],
+ result: Err(e.into()),
+ execution_metrics,
+ execute_and_commit_timings,
+ transaction_error_counter,
+ };
+ }
+
+ // note: execute_and_commit_timings.commit_us handled inside this function
+ let (commit_us, commit_bundle_details) = committer.commit_bundle(
+ &mut bundle_execution_results,
+ last_blockhash,
+ lamports_per_signature,
+ starting_transaction_index,
+ &bank_start.working_bank,
+ &mut execute_and_commit_timings,
+ );
+ execute_and_commit_timings.commit_us = commit_us;
+
+ drop(freeze_lock);
+
+ // commit_bundle_details contains transactions that were and were not committed
+ // given the current implementation only executes, records, and commits bundles
+ // where all transactions executed, we can filter out the non-committed
+ // TODO (LB): does this make more sense in commit_bundle for future when failing bundles are accepted?
+ let commit_transaction_details = commit_bundle_details
+ .commit_transaction_details
+ .into_iter()
+ .flat_map(|commit_details| {
+ commit_details
+ .into_iter()
+ .filter(|d| matches!(d, CommitTransactionDetails::Committed { .. }))
+ })
+ .collect();
+ debug!(
+ "bundle: {} commit details: {:?}",
+ sanitized_bundle.bundle_id, commit_transaction_details
+ );
+
+ ExecuteRecordCommitResult {
+ commit_transaction_details,
+ result: Ok(()),
+ execution_metrics,
+ execute_and_commit_timings,
+ transaction_error_counter,
+ }
+ }
+
+ /// Returns true if any of the transactions in a bundle mention one of the tip PDAs
+ fn bundle_touches_tip_pdas(bundle: &SanitizedBundle, tip_pdas: &HashSet) -> bool {
+ bundle.transactions.iter().any(|tx| {
+ tx.message()
+ .account_keys()
+ .iter()
+ .any(|a| tip_pdas.contains(a))
+ })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use {
+ crate::{
+ bundle_stage::{
+ bundle_account_locker::BundleAccountLocker, bundle_consumer::BundleConsumer,
+ bundle_packet_deserializer::BundlePacketDeserializer,
+ bundle_reserved_space_manager::BundleReservedSpaceManager,
+ bundle_stage_leader_metrics::BundleStageLeaderMetrics, committer::Committer,
+ QosService, UnprocessedTransactionStorage,
+ },
+ packet_bundle::PacketBundle,
+ proxy::block_engine_stage::BlockBuilderFeeInfo,
+ tip_manager::{TipDistributionAccountConfig, TipManager, TipManagerConfig},
+ },
+ crossbeam_channel::{unbounded, Receiver},
+ jito_tip_distribution::sdk::derive_tip_distribution_account_address,
+ rand::{thread_rng, RngCore},
+ solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics,
+ solana_cost_model::{block_cost_limits::MAX_BLOCK_UNITS, cost_model::CostModel},
+ solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo},
+ solana_ledger::{
+ blockstore::Blockstore, genesis_utils::create_genesis_config,
+ get_tmp_ledger_path_auto_delete, leader_schedule_cache::LeaderScheduleCache,
+ },
+ solana_perf::packet::PacketBatch,
+ solana_poh::{
+ poh_recorder::{PohRecorder, Record, WorkingBankEntry},
+ poh_service::PohService,
+ },
+ solana_program_test::programs::spl_programs,
+ solana_runtime::{
+ bank::Bank,
+ genesis_utils::{create_genesis_config_with_leader_ex, GenesisConfigInfo},
+ prioritization_fee_cache::PrioritizationFeeCache,
+ },
+ solana_sdk::{
+ bundle::{derive_bundle_id, SanitizedBundle},
+ clock::MAX_PROCESSING_AGE,
+ feature_set::delay_visibility_of_program_deployment,
+ fee_calculator::{FeeRateGovernor, DEFAULT_TARGET_LAMPORTS_PER_SIGNATURE},
+ genesis_config::ClusterType,
+ hash::Hash,
+ native_token::sol_to_lamports,
+ packet::Packet,
+ poh_config::PohConfig,
+ pubkey::Pubkey,
+ rent::Rent,
+ signature::{Keypair, Signer},
+ system_transaction::transfer,
+ transaction::{SanitizedTransaction, TransactionError, VersionedTransaction},
+ vote::state::VoteState,
+ },
+ solana_streamer::socket::SocketAddrSpace,
+ std::{
+ collections::{HashSet, VecDeque},
+ str::FromStr,
+ sync::{
+ atomic::{AtomicBool, Ordering},
+ Arc, Mutex, RwLock,
+ },
+ thread::{Builder, JoinHandle},
+ time::Duration,
+ },
+ };
+
+ struct TestFixture {
+ genesis_config_info: GenesisConfigInfo,
+ leader_keypair: Keypair,
+ bank: Arc,
+ exit: Arc,
+ poh_recorder: Arc>,
+ poh_simulator: JoinHandle<()>,
+ entry_receiver: Receiver,
+ }
+
+ pub(crate) fn simulate_poh(
+ record_receiver: Receiver,
+ poh_recorder: &Arc>,
+ ) -> JoinHandle<()> {
+ let poh_recorder = poh_recorder.clone();
+ let is_exited = poh_recorder.read().unwrap().is_exited.clone();
+ let tick_producer = Builder::new()
+ .name("solana-simulate_poh".to_string())
+ .spawn(move || loop {
+ PohService::read_record_receiver_and_process(
+ &poh_recorder,
+ &record_receiver,
+ Duration::from_millis(10),
+ );
+ if is_exited.load(Ordering::Relaxed) {
+ break;
+ }
+ });
+ tick_producer.unwrap()
+ }
+
+ pub fn create_test_recorder(
+ bank: &Arc,
+ blockstore: Arc,
+ poh_config: Option,
+ leader_schedule_cache: Option>,
+ ) -> (
+ Arc,
+ Arc>,
+ JoinHandle<()>,
+ Receiver,
+ ) {
+ let leader_schedule_cache = match leader_schedule_cache {
+ Some(provided_cache) => provided_cache,
+ None => Arc::new(LeaderScheduleCache::new_from_bank(bank)),
+ };
+ let exit = Arc::new(AtomicBool::new(false));
+ let poh_config = poh_config.unwrap_or_default();
+ let (mut poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
+ bank.tick_height(),
+ bank.last_blockhash(),
+ bank.clone(),
+ Some((4, 4)),
+ bank.ticks_per_slot(),
+ &Pubkey::default(),
+ blockstore,
+ &leader_schedule_cache,
+ &poh_config,
+ exit.clone(),
+ );
+ poh_recorder.set_bank(bank.clone(), false);
+
+ let poh_recorder = Arc::new(RwLock::new(poh_recorder));
+ let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
+
+ (exit, poh_recorder, poh_simulator, entry_receiver)
+ }
+
+ fn create_test_fixture(mint_sol: u64) -> TestFixture {
+ let mint_keypair = Keypair::new();
+ let leader_keypair = Keypair::new();
+ let voting_keypair = Keypair::new();
+
+ let rent = Rent::default();
+
+ let mut genesis_config = create_genesis_config_with_leader_ex(
+ sol_to_lamports(mint_sol as f64),
+ &mint_keypair.pubkey(),
+ &leader_keypair.pubkey(),
+ &voting_keypair.pubkey(),
+ &solana_sdk::pubkey::new_rand(),
+ rent.minimum_balance(VoteState::size_of()) + sol_to_lamports(1_000_000.0),
+ sol_to_lamports(1_000_000.0),
+ FeeRateGovernor {
+ // Initialize with a non-zero fee
+ lamports_per_signature: DEFAULT_TARGET_LAMPORTS_PER_SIGNATURE / 2,
+ ..FeeRateGovernor::default()
+ },
+ rent, // most tests don't expect rent
+ ClusterType::Development,
+ spl_programs(&rent),
+ );
+ genesis_config.ticks_per_slot *= 8;
+
+ // workaround for https://github.com/solana-labs/solana/issues/30085
+ // the test can deploy and use spl_programs in the genensis slot without waiting for the next one
+ let mut bank = Bank::new_for_tests(&genesis_config);
+ bank.deactivate_feature(&delay_visibility_of_program_deployment::id());
+ let bank = Arc::new(bank);
+
+ let ledger_path = get_tmp_ledger_path_auto_delete!();
+ let blockstore = Arc::new(
+ Blockstore::open(ledger_path.path())
+ .expect("Expected to be able to open database ledger"),
+ );
+
+ let (exit, poh_recorder, poh_simulator, entry_receiver) =
+ create_test_recorder(&bank, blockstore, Some(PohConfig::default()), None);
+
+ let validator_pubkey = voting_keypair.pubkey();
+ TestFixture {
+ genesis_config_info: GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ voting_keypair,
+ validator_pubkey,
+ },
+ leader_keypair,
+ bank,
+ exit,
+ poh_recorder,
+ poh_simulator,
+ entry_receiver,
+ }
+ }
+
+ fn make_random_overlapping_bundles(
+ mint_keypair: &Keypair,
+ num_bundles: usize,
+ num_packets_per_bundle: usize,
+ hash: Hash,
+ max_transfer_amount: u64,
+ ) -> Vec {
+ let mut rng = thread_rng();
+
+ (0..num_bundles)
+ .map(|_| {
+ let transfers: Vec<_> = (0..num_packets_per_bundle)
+ .map(|_| {
+ VersionedTransaction::from(transfer(
+ mint_keypair,
+ &mint_keypair.pubkey(),
+ rng.next_u64() % max_transfer_amount,
+ hash,
+ ))
+ })
+ .collect();
+ let bundle_id = derive_bundle_id(&transfers);
+
+ PacketBundle {
+ batch: PacketBatch::new(
+ transfers
+ .iter()
+ .map(|tx| Packet::from_data(None, tx).unwrap())
+ .collect(),
+ ),
+ bundle_id,
+ }
+ })
+ .collect()
+ }
+
+ fn get_tip_manager(vote_account: &Pubkey) -> TipManager {
+ TipManager::new(TipManagerConfig {
+ tip_payment_program_id: Pubkey::from_str("T1pyyaTNZsKv2WcRAB8oVnk93mLJw2XzjtVYqCsaHqt")
+ .unwrap(),
+ tip_distribution_program_id: Pubkey::from_str(
+ "4R3gSG8BpU4t19KYj8CfnbtRpnT8gtk4dvTHxVRwc2r7",
+ )
+ .unwrap(),
+ tip_distribution_account_config: TipDistributionAccountConfig {
+ merkle_root_upload_authority: Pubkey::new_unique(),
+ vote_account: *vote_account,
+ commission_bps: 10,
+ },
+ })
+ }
+
+ /// Happy-path bundle execution w/ no tip management
+ #[test]
+ fn test_bundle_no_tip_success() {
+ solana_logger::setup();
+ let TestFixture {
+ genesis_config_info,
+ leader_keypair,
+ bank,
+ exit,
+ poh_recorder,
+ poh_simulator,
+ entry_receiver,
+ } = create_test_fixture(1_000_000);
+ let recorder = poh_recorder.read().unwrap().new_recorder();
+
+ let status = poh_recorder.read().unwrap().reached_leader_slot();
+ info!("status: {:?}", status);
+
+ let (replay_vote_sender, _replay_vote_receiver) = unbounded();
+ let committer = Committer::new(
+ None,
+ replay_vote_sender,
+ Arc::new(PrioritizationFeeCache::new(0u64)),
+ );
+
+ let block_builder_pubkey = Pubkey::new_unique();
+ let tip_manager = get_tip_manager(&genesis_config_info.voting_keypair.pubkey());
+ let block_builder_info = Arc::new(Mutex::new(BlockBuilderFeeInfo {
+ block_builder: block_builder_pubkey,
+ block_builder_commission: 10,
+ }));
+
+ let cluster_info = Arc::new(ClusterInfo::new(
+ ContactInfo::new(leader_keypair.pubkey(), 0, 0),
+ Arc::new(leader_keypair),
+ SocketAddrSpace::new(true),
+ ));
+
+ let mut consumer = BundleConsumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ tip_manager,
+ BundleAccountLocker::default(),
+ block_builder_info,
+ Duration::from_secs(10),
+ cluster_info,
+ BundleReservedSpaceManager::new(
+ MAX_BLOCK_UNITS,
+ 3_000_000,
+ poh_recorder
+ .read()
+ .unwrap()
+ .ticks_per_slot()
+ .saturating_mul(8)
+ .saturating_div(10),
+ ),
+ );
+
+ let bank_start = poh_recorder.read().unwrap().bank_start().unwrap();
+
+ let mut bundle_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(10),
+ VecDeque::with_capacity(10),
+ );
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(1);
+
+ let mut packet_bundles = make_random_overlapping_bundles(
+ &genesis_config_info.mint_keypair,
+ 1,
+ 3,
+ genesis_config_info.genesis_config.hash(),
+ 10_000,
+ );
+ let deserialized_bundle = BundlePacketDeserializer::deserialize_bundle(
+ packet_bundles.get_mut(0).unwrap(),
+ false,
+ None,
+ )
+ .unwrap();
+ let mut error_metrics = TransactionErrorMetrics::default();
+ let sanitized_bundle = deserialized_bundle
+ .build_sanitized_bundle(
+ &bank_start.working_bank,
+ &HashSet::default(),
+ &mut error_metrics,
+ )
+ .unwrap();
+
+ let summary = bundle_storage.insert_bundles(vec![deserialized_bundle]);
+ assert_eq!(
+ summary.num_packets_inserted,
+ sanitized_bundle.transactions.len()
+ );
+ assert_eq!(summary.num_bundles_dropped, 0);
+ assert_eq!(summary.num_bundles_inserted, 1);
+
+ consumer.consume_buffered_bundles(
+ &bank_start,
+ &mut bundle_storage,
+ &mut bundle_stage_leader_metrics,
+ );
+
+ let mut transactions = Vec::new();
+ while let Ok(WorkingBankEntry {
+ bank: wbe_bank,
+ entries_ticks,
+ }) = entry_receiver.recv()
+ {
+ assert_eq!(bank.slot(), wbe_bank.slot());
+ for (entry, _) in entries_ticks {
+ if !entry.transactions.is_empty() {
+ // transactions in this test are all overlapping, so each entry will contain 1 transaction
+ assert_eq!(entry.transactions.len(), 1);
+ transactions.extend(entry.transactions);
+ }
+ }
+ if transactions.len() == sanitized_bundle.transactions.len() {
+ break;
+ }
+ }
+
+ let bundle_versioned_transactions: Vec<_> = sanitized_bundle
+ .transactions
+ .iter()
+ .map(|tx| tx.to_versioned_transaction())
+ .collect();
+ assert_eq!(transactions, bundle_versioned_transactions);
+
+ let check_results = bank.check_transactions(
+ &sanitized_bundle.transactions,
+ &vec![Ok(()); sanitized_bundle.transactions.len()],
+ MAX_PROCESSING_AGE,
+ &mut error_metrics,
+ );
+ assert_eq!(
+ check_results,
+ vec![
+ (Err(TransactionError::AlreadyProcessed), None);
+ sanitized_bundle.transactions.len()
+ ]
+ );
+
+ poh_recorder
+ .write()
+ .unwrap()
+ .is_exited
+ .store(true, Ordering::Relaxed);
+ exit.store(true, Ordering::Relaxed);
+ poh_simulator.join().unwrap();
+ // TODO (LB): cleanup blockstore
+ }
+
+ /// Happy-path bundle execution to ensure tip management works.
+ /// Tip management involves cranking setup bundles before executing the test bundle
+ #[test]
+ fn test_bundle_tip_program_setup_success() {
+ solana_logger::setup();
+ let TestFixture {
+ genesis_config_info,
+ leader_keypair,
+ bank,
+ exit,
+ poh_recorder,
+ poh_simulator,
+ entry_receiver,
+ } = create_test_fixture(1_000_000);
+ let recorder = poh_recorder.read().unwrap().new_recorder();
+
+ let (replay_vote_sender, _replay_vote_receiver) = unbounded();
+ let committer = Committer::new(
+ None,
+ replay_vote_sender,
+ Arc::new(PrioritizationFeeCache::new(0u64)),
+ );
+
+ let block_builder_pubkey = Pubkey::new_unique();
+ let tip_manager = get_tip_manager(&genesis_config_info.voting_keypair.pubkey());
+ let block_builder_info = Arc::new(Mutex::new(BlockBuilderFeeInfo {
+ block_builder: block_builder_pubkey,
+ block_builder_commission: 10,
+ }));
+
+ let cluster_info = Arc::new(ClusterInfo::new(
+ ContactInfo::new(leader_keypair.pubkey(), 0, 0),
+ Arc::new(leader_keypair),
+ SocketAddrSpace::new(true),
+ ));
+
+ let mut consumer = BundleConsumer::new(
+ committer,
+ recorder,
+ QosService::new(1),
+ None,
+ tip_manager.clone(),
+ BundleAccountLocker::default(),
+ block_builder_info,
+ Duration::from_secs(10),
+ cluster_info.clone(),
+ BundleReservedSpaceManager::new(
+ MAX_BLOCK_UNITS,
+ 3_000_000,
+ poh_recorder
+ .read()
+ .unwrap()
+ .ticks_per_slot()
+ .saturating_mul(8)
+ .saturating_div(10),
+ ),
+ );
+
+ let bank_start = poh_recorder.read().unwrap().bank_start().unwrap();
+
+ let mut bundle_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(10),
+ VecDeque::with_capacity(10),
+ );
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(1);
+ // MAIN LOGIC
+
+ // a bundle that tips the tip program
+ let tip_accounts = tip_manager.get_tip_accounts();
+ let tip_account = tip_accounts.iter().collect::>()[0];
+ let mut packet_bundle = PacketBundle {
+ batch: PacketBatch::new(vec![Packet::from_data(
+ None,
+ transfer(
+ &genesis_config_info.mint_keypair,
+ tip_account,
+ 1,
+ genesis_config_info.genesis_config.hash(),
+ ),
+ )
+ .unwrap()]),
+ bundle_id: "test_transfer".to_string(),
+ };
+
+ let deserialized_bundle =
+ BundlePacketDeserializer::deserialize_bundle(&mut packet_bundle, false, None).unwrap();
+ let mut error_metrics = TransactionErrorMetrics::default();
+ let sanitized_bundle = deserialized_bundle
+ .build_sanitized_bundle(
+ &bank_start.working_bank,
+ &HashSet::default(),
+ &mut error_metrics,
+ )
+ .unwrap();
+
+ let summary = bundle_storage.insert_bundles(vec![deserialized_bundle]);
+ assert_eq!(summary.num_bundles_inserted, 1);
+ assert_eq!(summary.num_packets_inserted, 1);
+ assert_eq!(summary.num_bundles_dropped, 0);
+
+ consumer.consume_buffered_bundles(
+ &bank_start,
+ &mut bundle_storage,
+ &mut bundle_stage_leader_metrics,
+ );
+
+ // its expected there are 3 transactions. One to initialize the tip program configuration, one to change the tip receiver,
+ // and another with the tip
+
+ let mut transactions = Vec::new();
+ while let Ok(WorkingBankEntry {
+ bank: wbe_bank,
+ entries_ticks,
+ }) = entry_receiver.recv()
+ {
+ assert_eq!(bank.slot(), wbe_bank.slot());
+ transactions.extend(entries_ticks.into_iter().flat_map(|(e, _)| e.transactions));
+ if transactions.len() == 5 {
+ break;
+ }
+ }
+
+ // tip management on the first bundle involves:
+ // calling initialize on the tip payment and tip distribution programs
+ // creating the tip distribution account for this validator's epoch (the MEV piggy bank)
+ // changing the tip receiver and block builder tx
+ // the original transfer that was sent
+ let keypair = cluster_info.keypair().clone();
+
+ assert_eq!(
+ transactions[0],
+ tip_manager
+ .initialize_tip_payment_program_tx(bank.last_blockhash(), &keypair)
+ .to_versioned_transaction()
+ );
+ assert_eq!(
+ transactions[1],
+ tip_manager
+ .initialize_tip_distribution_config_tx(bank.last_blockhash(), &keypair)
+ .to_versioned_transaction()
+ );
+ assert_eq!(
+ transactions[2],
+ tip_manager
+ .initialize_tip_distribution_account_tx(
+ bank.last_blockhash(),
+ bank.epoch(),
+ &keypair
+ )
+ .to_versioned_transaction()
+ );
+ // the first tip receiver + block builder are the initializer (keypair.pubkey()) as set by the
+ // TipPayment program during initialization
+ assert_eq!(
+ transactions[3],
+ tip_manager
+ .build_change_tip_receiver_and_block_builder_tx(
+ &keypair.pubkey(),
+ &derive_tip_distribution_account_address(
+ &tip_manager.tip_distribution_program_id(),
+ &genesis_config_info.validator_pubkey,
+ bank_start.working_bank.epoch()
+ )
+ .0,
+ &bank_start.working_bank,
+ &keypair,
+ &keypair.pubkey(),
+ &block_builder_pubkey,
+ 10
+ )
+ .to_versioned_transaction()
+ );
+ assert_eq!(
+ transactions[4],
+ sanitized_bundle.transactions[0].to_versioned_transaction()
+ );
+
+ poh_recorder
+ .write()
+ .unwrap()
+ .is_exited
+ .store(true, Ordering::Relaxed);
+ exit.store(true, Ordering::Relaxed);
+ poh_simulator.join().unwrap();
+ }
+
+ #[test]
+ fn test_handle_tip_programs() {
+ solana_logger::setup();
+ let TestFixture {
+ genesis_config_info,
+ leader_keypair,
+ bank,
+ exit,
+ poh_recorder,
+ poh_simulator,
+ entry_receiver,
+ } = create_test_fixture(1_000_000);
+ let recorder = poh_recorder.read().unwrap().new_recorder();
+
+ let (replay_vote_sender, _replay_vote_receiver) = unbounded();
+ let committer = Committer::new(
+ None,
+ replay_vote_sender,
+ Arc::new(PrioritizationFeeCache::new(0u64)),
+ );
+
+ let block_builder_pubkey = Pubkey::new_unique();
+ let tip_manager = get_tip_manager(&genesis_config_info.voting_keypair.pubkey());
+ let block_builder_info = Arc::new(Mutex::new(BlockBuilderFeeInfo {
+ block_builder: block_builder_pubkey,
+ block_builder_commission: 10,
+ }));
+
+ let cluster_info = Arc::new(ClusterInfo::new(
+ ContactInfo::new(leader_keypair.pubkey(), 0, 0),
+ Arc::new(leader_keypair),
+ SocketAddrSpace::new(true),
+ ));
+
+ let bank_start = poh_recorder.read().unwrap().bank_start().unwrap();
+
+ let reserved_ticks = bank.max_tick_height().saturating_mul(8).saturating_div(10);
+
+ // The first 80% of the block, based on poh ticks, has `preallocated_bundle_cost` less compute units.
+ // The last 20% has has full compute so blockspace is maximized if BundleStage is idle.
+ let reserved_space =
+ BundleReservedSpaceManager::new(MAX_BLOCK_UNITS, 3_000_000, reserved_ticks);
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(1);
+ assert_matches!(
+ BundleConsumer::handle_tip_programs(
+ &BundleAccountLocker::default(),
+ &tip_manager,
+ &cluster_info,
+ &block_builder_info,
+ &committer,
+ &recorder,
+ &QosService::new(1),
+ &None,
+ Duration::from_secs(10),
+ &reserved_space,
+ &bank_start,
+ &mut bundle_stage_leader_metrics
+ ),
+ Ok(())
+ );
+
+ let mut transactions = Vec::new();
+ while let Ok(WorkingBankEntry {
+ bank: wbe_bank,
+ entries_ticks,
+ }) = entry_receiver.recv()
+ {
+ assert_eq!(bank.slot(), wbe_bank.slot());
+ transactions.extend(entries_ticks.into_iter().flat_map(|(e, _)| e.transactions));
+ if transactions.len() == 4 {
+ break;
+ }
+ }
+
+ let keypair = cluster_info.keypair().clone();
+ // expect to see initialize tip payment program, tip distribution program, initialize tip distribution account, change tip receiver + change block builder
+ assert_eq!(
+ transactions[0],
+ tip_manager
+ .initialize_tip_payment_program_tx(bank.last_blockhash(), &keypair)
+ .to_versioned_transaction()
+ );
+ assert_eq!(
+ transactions[1],
+ tip_manager
+ .initialize_tip_distribution_config_tx(bank.last_blockhash(), &keypair)
+ .to_versioned_transaction()
+ );
+ assert_eq!(
+ transactions[2],
+ tip_manager
+ .initialize_tip_distribution_account_tx(
+ bank.last_blockhash(),
+ bank.epoch(),
+ &keypair
+ )
+ .to_versioned_transaction()
+ );
+ // the first tip receiver + block builder are the initializer (keypair.pubkey()) as set by the
+ // TipPayment program during initialization
+ assert_eq!(
+ transactions[3],
+ tip_manager
+ .build_change_tip_receiver_and_block_builder_tx(
+ &keypair.pubkey(),
+ &derive_tip_distribution_account_address(
+ &tip_manager.tip_distribution_program_id(),
+ &genesis_config_info.validator_pubkey,
+ bank_start.working_bank.epoch()
+ )
+ .0,
+ &bank_start.working_bank,
+ &keypair,
+ &keypair.pubkey(),
+ &block_builder_pubkey,
+ 10
+ )
+ .to_versioned_transaction()
+ );
+
+ poh_recorder
+ .write()
+ .unwrap()
+ .is_exited
+ .store(true, Ordering::Relaxed);
+ exit.store(true, Ordering::Relaxed);
+ poh_simulator.join().unwrap();
+ }
+
+ #[test]
+ fn test_reserve_bundle_blockspace_success() {
+ let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10);
+ let bank = Arc::new(Bank::new_for_tests(&genesis_config));
+
+ let keypair1 = Keypair::new();
+ let keypair2 = Keypair::new();
+ let transfer_tx = SanitizedTransaction::from_transaction_for_tests(transfer(
+ &keypair1,
+ &keypair2.pubkey(),
+ 1,
+ bank.parent_hash(),
+ ));
+ let sanitized_bundle = SanitizedBundle {
+ transactions: vec![transfer_tx],
+ bundle_id: String::default(),
+ };
+
+ let transfer_cost =
+ CostModel::calculate_cost(&sanitized_bundle.transactions[0], &bank.feature_set);
+
+ let qos_service = QosService::new(1);
+ let reserved_ticks = bank.max_tick_height().saturating_mul(8).saturating_div(10);
+
+ // The first 80% of the block, based on poh ticks, has `preallocated_bundle_cost` less compute units.
+ // The last 20% has has full compute so blockspace is maximized if BundleStage is idle.
+ let reserved_space =
+ BundleReservedSpaceManager::new(MAX_BLOCK_UNITS, 3_000_000, reserved_ticks);
+
+ assert!(BundleConsumer::reserve_bundle_blockspace(
+ &qos_service,
+ &reserved_space,
+ &sanitized_bundle,
+ &bank
+ )
+ .is_ok());
+ assert_eq!(
+ bank.read_cost_tracker().unwrap().block_cost(),
+ transfer_cost.sum()
+ );
+ }
+
+ #[test]
+ fn test_reserve_bundle_blockspace_failure() {
+ let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10);
+ let bank = Arc::new(Bank::new_for_tests(&genesis_config));
+
+ let keypair1 = Keypair::new();
+ let keypair2 = Keypair::new();
+ let transfer_tx1 = SanitizedTransaction::from_transaction_for_tests(transfer(
+ &keypair1,
+ &keypair2.pubkey(),
+ 1,
+ bank.parent_hash(),
+ ));
+ let transfer_tx2 = SanitizedTransaction::from_transaction_for_tests(transfer(
+ &keypair1,
+ &keypair2.pubkey(),
+ 2,
+ bank.parent_hash(),
+ ));
+ let sanitized_bundle = SanitizedBundle {
+ transactions: vec![transfer_tx1, transfer_tx2],
+ bundle_id: String::default(),
+ };
+
+ // set block cost limit to 1 transfer transaction, try to process 2, should return an error
+ // and rollback block cost added
+ let transfer_cost =
+ CostModel::calculate_cost(&sanitized_bundle.transactions[0], &bank.feature_set);
+ bank.write_cost_tracker()
+ .unwrap()
+ .set_block_cost_limit(transfer_cost.sum());
+
+ let qos_service = QosService::new(1);
+ let reserved_ticks = bank.max_tick_height().saturating_mul(8).saturating_div(10);
+
+ // The first 80% of the block, based on poh ticks, has `preallocated_bundle_cost` less compute units.
+ // The last 20% has has full compute so blockspace is maximized if BundleStage is idle.
+ let reserved_space = BundleReservedSpaceManager::new(
+ bank.read_cost_tracker().unwrap().block_cost(),
+ 50,
+ reserved_ticks,
+ );
+
+ assert!(BundleConsumer::reserve_bundle_blockspace(
+ &qos_service,
+ &reserved_space,
+ &sanitized_bundle,
+ &bank
+ )
+ .is_err());
+ assert_eq!(bank.read_cost_tracker().unwrap().block_cost(), 0);
+ assert_eq!(
+ bank.read_cost_tracker().unwrap().block_cost_limit(),
+ bank.read_cost_tracker()
+ .unwrap()
+ .block_cost_limit()
+ .saturating_sub(50)
+ );
+ }
+}
diff --git a/core/src/bundle_stage/bundle_packet_deserializer.rs b/core/src/bundle_stage/bundle_packet_deserializer.rs
new file mode 100644
index 00000000000000..02fc9ff6a1183f
--- /dev/null
+++ b/core/src/bundle_stage/bundle_packet_deserializer.rs
@@ -0,0 +1,284 @@
+//! Deserializes PacketBundles
+use {
+ crate::{
+ immutable_deserialized_bundle::{DeserializedBundleError, ImmutableDeserializedBundle},
+ packet_bundle::PacketBundle,
+ },
+ crossbeam_channel::{Receiver, RecvTimeoutError},
+ solana_runtime::bank_forks::BankForks,
+ solana_sdk::saturating_add_assign,
+ std::{
+ sync::{Arc, RwLock},
+ time::{Duration, Instant},
+ },
+};
+
+/// Results from deserializing packet batches.
+#[derive(Debug)]
+pub struct ReceiveBundleResults {
+ /// Deserialized bundles from all received bundle packets
+ pub deserialized_bundles: Vec,
+ /// Number of dropped bundles
+ pub num_dropped_bundles: usize,
+ /// Number of dropped packets
+ pub num_dropped_packets: usize,
+}
+
+pub struct BundlePacketDeserializer {
+ /// Receiver for bundle packets
+ bundle_packet_receiver: Receiver>,
+ /// Provides working bank for deserializer to check feature activation
+ bank_forks: Arc>,
+ /// Max packets per bundle
+ max_packets_per_bundle: Option,
+}
+
+impl BundlePacketDeserializer {
+ pub fn new(
+ bundle_packet_receiver: Receiver>,
+ bank_forks: Arc>,
+ max_packets_per_bundle: Option,
+ ) -> Self {
+ Self {
+ bundle_packet_receiver,
+ bank_forks,
+ max_packets_per_bundle,
+ }
+ }
+
+ /// Handles receiving bundles and deserializing them
+ pub fn receive_bundles(
+ &self,
+ recv_timeout: Duration,
+ capacity: usize,
+ ) -> Result {
+ let (bundle_count, _packet_count, mut bundles) =
+ self.receive_until(recv_timeout, capacity)?;
+
+ // Note: this can be removed after feature `round_compute_unit_price` is activated in
+ // mainnet-beta
+ let _working_bank = self.bank_forks.read().unwrap().working_bank();
+ let round_compute_unit_price_enabled = false; // TODO get from working_bank.feature_set
+
+ Ok(Self::deserialize_and_collect_bundles(
+ bundle_count,
+ &mut bundles,
+ round_compute_unit_price_enabled,
+ self.max_packets_per_bundle,
+ ))
+ }
+
+ /// Deserialize packet batches, aggregates tracer packet stats, and collect
+ /// them into ReceivePacketResults
+ fn deserialize_and_collect_bundles(
+ bundle_count: usize,
+ bundles: &mut [PacketBundle],
+ round_compute_unit_price_enabled: bool,
+ max_packets_per_bundle: Option,
+ ) -> ReceiveBundleResults {
+ let mut deserialized_bundles = Vec::with_capacity(bundle_count);
+ let mut num_dropped_bundles: usize = 0;
+ let mut num_dropped_packets: usize = 0;
+
+ for bundle in bundles.iter_mut() {
+ match Self::deserialize_bundle(
+ bundle,
+ round_compute_unit_price_enabled,
+ max_packets_per_bundle,
+ ) {
+ Ok(deserialized_bundle) => {
+ deserialized_bundles.push(deserialized_bundle);
+ }
+ Err(_) => {
+ // TODO (LB): prob wanna collect stats here
+ saturating_add_assign!(num_dropped_bundles, 1);
+ saturating_add_assign!(num_dropped_packets, bundle.batch.len());
+ }
+ }
+ }
+
+ ReceiveBundleResults {
+ deserialized_bundles,
+ num_dropped_bundles,
+ num_dropped_packets,
+ }
+ }
+
+ /// Receives bundle packets
+ fn receive_until(
+ &self,
+ recv_timeout: Duration,
+ bundle_count_upperbound: usize,
+ ) -> Result<(usize, usize, Vec), RecvTimeoutError> {
+ let start = Instant::now();
+
+ let mut bundles = self.bundle_packet_receiver.recv_timeout(recv_timeout)?;
+ let mut num_packets_received: usize = bundles.iter().map(|pb| pb.batch.len()).sum();
+ let mut num_bundles_received: usize = bundles.len();
+
+ if num_bundles_received <= bundle_count_upperbound {
+ while let Ok(bundle_packets) = self.bundle_packet_receiver.try_recv() {
+ trace!("got more packet batches in bundle packet deserializer");
+
+ saturating_add_assign!(
+ num_packets_received,
+ bundle_packets
+ .iter()
+ .map(|pb| pb.batch.len())
+ .sum::()
+ );
+ saturating_add_assign!(num_bundles_received, bundle_packets.len());
+
+ bundles.extend(bundle_packets);
+
+ if start.elapsed() >= recv_timeout
+ || num_bundles_received >= bundle_count_upperbound
+ {
+ break;
+ }
+ }
+ }
+
+ Ok((num_bundles_received, num_packets_received, bundles))
+ }
+
+ /// Deserializes the Bundle into DeserializedBundlePackets, returning None if any packet in the
+ /// bundle failed to deserialize
+ pub fn deserialize_bundle(
+ bundle: &mut PacketBundle,
+ round_compute_unit_price_enabled: bool,
+ max_packets_per_bundle: Option,
+ ) -> Result {
+ bundle.batch.iter_mut().for_each(|p| {
+ p.meta_mut()
+ .set_round_compute_unit_price(round_compute_unit_price_enabled);
+ });
+
+ ImmutableDeserializedBundle::new(bundle, max_packets_per_bundle)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use {
+ super::*,
+ crossbeam_channel::unbounded,
+ solana_ledger::genesis_utils::create_genesis_config,
+ solana_perf::packet::PacketBatch,
+ solana_runtime::{bank::Bank, genesis_utils::GenesisConfigInfo},
+ solana_sdk::{packet::Packet, signature::Signer, system_transaction::transfer},
+ };
+
+ #[test]
+ fn test_deserialize_and_collect_bundles_empty() {
+ let results =
+ BundlePacketDeserializer::deserialize_and_collect_bundles(0, &mut [], false, Some(5));
+ assert_eq!(results.deserialized_bundles.len(), 0);
+ assert_eq!(results.num_dropped_packets, 0);
+ assert_eq!(results.num_dropped_bundles, 0);
+ }
+
+ #[test]
+ fn test_receive_bundles_capacity() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks =
+ BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let (sender, receiver) = unbounded();
+
+ let deserializer = BundlePacketDeserializer::new(receiver, bank_forks, Some(10));
+
+ let packet_bundles: Vec<_> = (0..10)
+ .map(|_| PacketBundle {
+ batch: PacketBatch::new(vec![Packet::from_data(
+ None,
+ transfer(
+ &mint_keypair,
+ &mint_keypair.pubkey(),
+ 100,
+ genesis_config.hash(),
+ ),
+ )
+ .unwrap()]),
+ bundle_id: String::default(),
+ })
+ .collect();
+
+ sender.send(packet_bundles.clone()).unwrap();
+
+ let bundles = deserializer
+ .receive_bundles(Duration::from_millis(100), 5)
+ .unwrap();
+ // this is confusing, but it's sent as one batch
+ assert_eq!(bundles.deserialized_bundles.len(), 10);
+ assert_eq!(bundles.num_dropped_bundles, 0);
+ assert_eq!(bundles.num_dropped_packets, 0);
+
+ // make sure empty
+ assert_matches!(
+ deserializer.receive_bundles(Duration::from_millis(100), 5),
+ Err(RecvTimeoutError::Timeout)
+ );
+
+ // send 2x 10 size batches. capacity is 5, but will return 10 since that's the batch size
+ sender.send(packet_bundles.clone()).unwrap();
+ sender.send(packet_bundles).unwrap();
+ let bundles = deserializer
+ .receive_bundles(Duration::from_millis(100), 5)
+ .unwrap();
+ assert_eq!(bundles.deserialized_bundles.len(), 10);
+ assert_eq!(bundles.num_dropped_bundles, 0);
+ assert_eq!(bundles.num_dropped_packets, 0);
+
+ let bundles = deserializer
+ .receive_bundles(Duration::from_millis(100), 5)
+ .unwrap();
+ assert_eq!(bundles.deserialized_bundles.len(), 10);
+ assert_eq!(bundles.num_dropped_bundles, 0);
+ assert_eq!(bundles.num_dropped_packets, 0);
+
+ assert_matches!(
+ deserializer.receive_bundles(Duration::from_millis(100), 5),
+ Err(RecvTimeoutError::Timeout)
+ );
+ }
+
+ #[test]
+ fn test_receive_bundles_bad_bundles() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair: _,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks =
+ BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let (sender, receiver) = unbounded();
+
+ let deserializer = BundlePacketDeserializer::new(receiver, bank_forks, Some(10));
+
+ let packet_bundles: Vec<_> = (0..10)
+ .map(|_| PacketBundle {
+ batch: PacketBatch::new(vec![]),
+ bundle_id: String::default(),
+ })
+ .collect();
+ sender.send(packet_bundles).unwrap();
+
+ let bundles = deserializer
+ .receive_bundles(Duration::from_millis(100), 5)
+ .unwrap();
+ // this is confusing, but it's sent as one batch
+ assert_eq!(bundles.deserialized_bundles.len(), 0);
+ assert_eq!(bundles.num_dropped_bundles, 10);
+ assert_eq!(bundles.num_dropped_packets, 0);
+ }
+}
diff --git a/core/src/bundle_stage/bundle_packet_receiver.rs b/core/src/bundle_stage/bundle_packet_receiver.rs
new file mode 100644
index 00000000000000..33bc2d935a3744
--- /dev/null
+++ b/core/src/bundle_stage/bundle_packet_receiver.rs
@@ -0,0 +1,836 @@
+use {
+ super::BundleStageLoopMetrics,
+ crate::{
+ banking_stage::unprocessed_transaction_storage::UnprocessedTransactionStorage,
+ bundle_stage::{
+ bundle_packet_deserializer::{BundlePacketDeserializer, ReceiveBundleResults},
+ bundle_stage_leader_metrics::BundleStageLeaderMetrics,
+ },
+ immutable_deserialized_bundle::ImmutableDeserializedBundle,
+ packet_bundle::PacketBundle,
+ },
+ crossbeam_channel::{Receiver, RecvTimeoutError},
+ solana_measure::{measure::Measure, measure_us},
+ solana_runtime::bank_forks::BankForks,
+ solana_sdk::timing::timestamp,
+ std::{
+ sync::{Arc, RwLock},
+ time::Duration,
+ },
+};
+
+pub struct BundleReceiver {
+ id: u32,
+ bundle_packet_deserializer: BundlePacketDeserializer,
+}
+
+impl BundleReceiver {
+ pub fn new(
+ id: u32,
+ bundle_packet_receiver: Receiver>,
+ bank_forks: Arc>,
+ max_packets_per_bundle: Option,
+ ) -> Self {
+ Self {
+ id,
+ bundle_packet_deserializer: BundlePacketDeserializer::new(
+ bundle_packet_receiver,
+ bank_forks,
+ max_packets_per_bundle,
+ ),
+ }
+ }
+
+ /// Receive incoming packets, push into unprocessed buffer with packet indexes
+ pub fn receive_and_buffer_bundles(
+ &mut self,
+ unprocessed_bundle_storage: &mut UnprocessedTransactionStorage,
+ bundle_stage_metrics: &mut BundleStageLoopMetrics,
+ bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics,
+ ) -> Result<(), RecvTimeoutError> {
+ let (result, recv_time_us) = measure_us!({
+ let recv_timeout = Self::get_receive_timeout(unprocessed_bundle_storage);
+ let mut recv_and_buffer_measure = Measure::start("recv_and_buffer");
+ self.bundle_packet_deserializer
+ .receive_bundles(recv_timeout, unprocessed_bundle_storage.max_receive_size())
+ // Consumes results if Ok, otherwise we keep the Err
+ .map(|receive_bundle_results| {
+ self.buffer_bundles(
+ receive_bundle_results,
+ unprocessed_bundle_storage,
+ bundle_stage_metrics,
+ // tracer_packet_stats,
+ bundle_stage_leader_metrics,
+ );
+ recv_and_buffer_measure.stop();
+ bundle_stage_metrics.increment_receive_and_buffer_bundles_elapsed_us(
+ recv_and_buffer_measure.as_us(),
+ );
+ })
+ });
+
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .increment_receive_and_buffer_packets_us(recv_time_us);
+
+ result
+ }
+
+ fn get_receive_timeout(
+ unprocessed_transaction_storage: &UnprocessedTransactionStorage,
+ ) -> Duration {
+ // Gossip thread will almost always not wait because the transaction storage will most likely not be empty
+ if !unprocessed_transaction_storage.is_empty() {
+ // If there are buffered packets, run the equivalent of try_recv to try reading more
+ // packets. This prevents starving BankingStage::consume_buffered_packets due to
+ // buffered_packet_batches containing transactions that exceed the cost model for
+ // the current bank.
+ Duration::from_millis(0)
+ } else {
+ // BundleStage should pick up a working_bank as fast as possible
+ Duration::from_millis(100)
+ }
+ }
+
+ fn buffer_bundles(
+ &self,
+ ReceiveBundleResults {
+ deserialized_bundles,
+ num_dropped_bundles: _,
+ num_dropped_packets: _,
+ }: ReceiveBundleResults,
+ unprocessed_transaction_storage: &mut UnprocessedTransactionStorage,
+ bundle_stage_stats: &mut BundleStageLoopMetrics,
+ bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics,
+ ) {
+ let bundle_count = deserialized_bundles.len();
+ let packet_count: usize = deserialized_bundles.iter().map(|b| b.len()).sum();
+
+ bundle_stage_stats.increment_num_bundles_received(bundle_count as u64);
+ bundle_stage_stats.increment_num_packets_received(packet_count as u64);
+
+ debug!(
+ "@{:?} bundles: {} txs: {} id: {}",
+ timestamp(),
+ bundle_count,
+ packet_count,
+ self.id
+ );
+
+ Self::push_unprocessed(
+ unprocessed_transaction_storage,
+ deserialized_bundles,
+ bundle_stage_leader_metrics,
+ bundle_stage_stats,
+ );
+ }
+
+ fn push_unprocessed(
+ unprocessed_transaction_storage: &mut UnprocessedTransactionStorage,
+ deserialized_bundles: Vec,
+ bundle_stage_leader_metrics: &mut BundleStageLeaderMetrics,
+ bundle_stage_stats: &mut BundleStageLoopMetrics,
+ ) {
+ if !deserialized_bundles.is_empty() {
+ let insert_bundles_summary =
+ unprocessed_transaction_storage.insert_bundles(deserialized_bundles);
+
+ bundle_stage_stats.increment_newly_buffered_bundles_count(
+ insert_bundles_summary.num_bundles_inserted as u64,
+ );
+ bundle_stage_stats
+ .increment_num_bundles_dropped(insert_bundles_summary.num_bundles_dropped as u64);
+
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .increment_newly_buffered_packets_count(
+ insert_bundles_summary.num_packets_inserted as u64,
+ );
+
+ bundle_stage_leader_metrics
+ .leader_slot_metrics_tracker()
+ .accumulate_insert_packet_batches_summary(
+ &insert_bundles_summary.insert_packets_summary,
+ );
+ }
+ }
+}
+
+/// This tests functionality of BundlePacketReceiver and the internals of BundleStorage because
+/// they're tightly intertwined
+#[cfg(test)]
+mod tests {
+ use {
+ super::*,
+ crossbeam_channel::unbounded,
+ rand::{thread_rng, RngCore},
+ solana_bundle::{
+ bundle_execution::LoadAndExecuteBundleError, BundleExecutionError, TipError,
+ },
+ solana_ledger::genesis_utils::create_genesis_config,
+ solana_perf::packet::PacketBatch,
+ solana_poh::poh_recorder::PohRecorderError,
+ solana_runtime::{bank::Bank, genesis_utils::GenesisConfigInfo},
+ solana_sdk::{
+ bundle::{derive_bundle_id, SanitizedBundle},
+ hash::Hash,
+ packet::Packet,
+ signature::{Keypair, Signer},
+ system_transaction::transfer,
+ transaction::VersionedTransaction,
+ },
+ std::collections::{HashSet, VecDeque},
+ };
+
+ /// Makes `num_bundles` random bundles with `num_packets_per_bundle` packets per bundle.
+ fn make_random_bundles(
+ mint_keypair: &Keypair,
+ num_bundles: usize,
+ num_packets_per_bundle: usize,
+ hash: Hash,
+ ) -> Vec {
+ let mut rng = thread_rng();
+
+ (0..num_bundles)
+ .map(|_| {
+ let transfers: Vec<_> = (0..num_packets_per_bundle)
+ .map(|_| {
+ VersionedTransaction::from(transfer(
+ mint_keypair,
+ &mint_keypair.pubkey(),
+ rng.next_u64(),
+ hash,
+ ))
+ })
+ .collect();
+ let bundle_id = derive_bundle_id(&transfers);
+
+ PacketBundle {
+ batch: PacketBatch::new(
+ transfers
+ .iter()
+ .map(|tx| Packet::from_data(None, tx).unwrap())
+ .collect(),
+ ),
+ bundle_id,
+ }
+ })
+ .collect()
+ }
+
+ fn assert_bundles_same(
+ packet_bundles: &[PacketBundle],
+ bundles_to_process: &[(ImmutableDeserializedBundle, SanitizedBundle)],
+ ) {
+ assert_eq!(packet_bundles.len(), bundles_to_process.len());
+ packet_bundles
+ .iter()
+ .zip(bundles_to_process.iter())
+ .for_each(|(packet_bundle, (_, sanitized_bundle))| {
+ assert_eq!(packet_bundle.bundle_id, sanitized_bundle.bundle_id);
+ assert_eq!(
+ packet_bundle.batch.len(),
+ sanitized_bundle.transactions.len()
+ );
+ });
+ }
+
+ #[test]
+ fn test_receive_bundles() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks =
+ BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(1_000),
+ VecDeque::with_capacity(1_000),
+ );
+
+ let (sender, receiver) = unbounded();
+ let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5));
+
+ let bundles = make_random_bundles(&mint_keypair, 10, 2, genesis_config.hash());
+ sender.send(bundles.clone()).unwrap();
+
+ let mut bundle_stage_stats = BundleStageLoopMetrics::default();
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0);
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 10);
+ assert_eq!(bundle_storage.unprocessed_packets_len(), 20);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_packets_len(), 0);
+ assert_eq!(bundle_storage.max_receive_size(), 990);
+
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles, bundles_to_process);
+ (0..bundles_to_process.len()).map(|_| Ok(())).collect()
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ assert_eq!(bundle_storage.unprocessed_packets_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_packets_len(), 0);
+ assert_eq!(bundle_storage.max_receive_size(), 1000);
+ }
+
+ #[test]
+ fn test_receive_more_bundles_than_capacity() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks =
+ BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(10),
+ VecDeque::with_capacity(10),
+ );
+
+ let (sender, receiver) = unbounded();
+ let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5));
+
+ let bundles = make_random_bundles(&mint_keypair, 15, 2, genesis_config.hash());
+
+ sender.send(bundles.clone()).unwrap();
+
+ let mut bundle_stage_stats = BundleStageLoopMetrics::default();
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0);
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+ // 15 bundles were sent, but the capacity is 10
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 10);
+ assert_eq!(bundle_storage.unprocessed_packets_len(), 20);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_packets_len(), 0);
+
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ // make sure the first 10 bundles are the ones to process
+ assert_bundles_same(&bundles[0..10], bundles_to_process);
+ (0..bundles_to_process.len()).map(|_| Ok(())).collect()
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0);
+ }
+
+ #[test]
+ fn test_process_bundles_poh_record_error_rebuffered() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks =
+ BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(10),
+ VecDeque::with_capacity(10),
+ );
+
+ let (sender, receiver) = unbounded();
+ let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5));
+
+ // send 5 bundles across the queue
+ let bundles = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash());
+ sender.send(bundles.clone()).unwrap();
+
+ let mut bundle_stage_stats = BundleStageLoopMetrics::default();
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0);
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let poh_max_height_reached_index = 3;
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+
+ // make sure poh end of slot reached + the correct bundles are buffered for the next time.
+ // bundles at index 3 + 4 are rebuffered
+ assert!(bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles, bundles_to_process);
+
+ let mut results = vec![Ok(()); bundles_to_process.len()];
+
+ (poh_max_height_reached_index..bundles_to_process.len()).for_each(|index| {
+ results[index] = Err(BundleExecutionError::PohRecordError(
+ PohRecorderError::MaxHeightReached,
+ ));
+ });
+ results
+ }
+ ));
+
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 2);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0);
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles[poh_max_height_reached_index..], bundles_to_process);
+ vec![Ok(()); bundles_to_process.len()]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ }
+
+ #[test]
+ fn test_process_bundles_bank_processing_done_rebuffered() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks =
+ BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(10),
+ VecDeque::with_capacity(10),
+ );
+
+ let (sender, receiver) = unbounded();
+ let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5));
+
+ // send 5 bundles across the queue
+ let bundles = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash());
+ sender.send(bundles.clone()).unwrap();
+
+ let mut bundle_stage_stats = BundleStageLoopMetrics::default();
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0);
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let bank_processing_done_index = 3;
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+
+ // bundles at index 3 + 4 are rebuffered
+ assert!(bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles, bundles_to_process);
+
+ let mut results = vec![Ok(()); bundles_to_process.len()];
+
+ (bank_processing_done_index..bundles_to_process.len()).for_each(|index| {
+ results[index] = Err(BundleExecutionError::BankProcessingTimeLimitReached);
+ });
+ results
+ }
+ ));
+
+ // 0, 1, 2 processed; 3, 4 buffered
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 2);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0);
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles[bank_processing_done_index..], bundles_to_process);
+ vec![Ok(()); bundles_to_process.len()]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ }
+
+ #[test]
+ fn test_process_bundles_bank_execution_error_dropped() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks =
+ BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(10),
+ VecDeque::with_capacity(10),
+ );
+
+ let (sender, receiver) = unbounded();
+ let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5));
+
+ // send 5 bundles across the queue
+ let bundles = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash());
+ sender.send(bundles.clone()).unwrap();
+
+ let mut bundle_stage_stats = BundleStageLoopMetrics::default();
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0);
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles, bundles_to_process);
+ vec![
+ Err(BundleExecutionError::TransactionFailure(
+ LoadAndExecuteBundleError::ProcessingTimeExceeded(Duration::from_secs(1)),
+ ));
+ bundles_to_process.len()
+ ]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ }
+
+ #[test]
+ fn test_process_bundles_tip_error_dropped() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks =
+ BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(10),
+ VecDeque::with_capacity(10),
+ );
+
+ let (sender, receiver) = unbounded();
+ let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5));
+
+ // send 5 bundles across the queue
+ let bundles = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash());
+ sender.send(bundles.clone()).unwrap();
+
+ let mut bundle_stage_stats = BundleStageLoopMetrics::default();
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0);
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles, bundles_to_process);
+ vec![
+ Err(BundleExecutionError::TipError(TipError::LockError));
+ bundles_to_process.len()
+ ]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ }
+
+ #[test]
+ fn test_process_bundles_lock_error_dropped() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks =
+ BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(10),
+ VecDeque::with_capacity(10),
+ );
+
+ let (sender, receiver) = unbounded();
+ let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5));
+
+ // send 5 bundles across the queue
+ let bundles = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash());
+ sender.send(bundles).unwrap();
+
+ let mut bundle_stage_stats = BundleStageLoopMetrics::default();
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0);
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ vec![Err(BundleExecutionError::LockError); bundles_to_process.len()]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ }
+
+ #[test]
+ fn test_process_bundles_cost_model_exceeded_set_aside_and_requeued() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks =
+ BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(10),
+ VecDeque::with_capacity(10),
+ );
+
+ let (sender, receiver) = unbounded();
+ let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5));
+
+ // send 5 bundles across the queue
+ let bundles = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash());
+ sender.send(bundles.clone()).unwrap();
+
+ let mut bundle_stage_stats = BundleStageLoopMetrics::default();
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0);
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+
+ // buffered bundles are moved to cost model side deque
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles, bundles_to_process);
+ vec![Err(BundleExecutionError::ExceedsCostModel); bundles_to_process.len()]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 5);
+
+ // double check there's no bundles to process
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert!(bundles_to_process.is_empty());
+ vec![Ok(()); bundles_to_process.len()]
+ }
+ ));
+
+ // create a new bank w/ new slot number, cost model buffered packets should move back onto queue
+ // in the same order they were originally
+ let bank = bank_forks.read().unwrap().working_bank();
+ let new_bank = Arc::new(Bank::new_from_parent(
+ bank.clone(),
+ bank.collector_id(),
+ bank.slot() + 1,
+ ));
+ assert!(!bundle_storage.process_bundles(
+ new_bank,
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ // make sure same order as original
+ assert_bundles_same(&bundles, bundles_to_process);
+ vec![Ok(()); bundles_to_process.len()]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0);
+ }
+
+ #[test]
+ fn test_process_bundles_cost_model_exceeded_buffer_capacity() {
+ solana_logger::setup();
+
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank_forks =
+ BankForks::new_rw_arc(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let mut unprocessed_storage = UnprocessedTransactionStorage::new_bundle_storage(
+ VecDeque::with_capacity(10),
+ VecDeque::with_capacity(10),
+ );
+
+ let (sender, receiver) = unbounded();
+ let mut bundle_receiver = BundleReceiver::new(0, receiver, bank_forks.clone(), Some(5));
+
+ // send 15 bundles across the queue
+ let bundles0 = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash());
+ sender.send(bundles0.clone()).unwrap();
+
+ let mut bundle_stage_stats = BundleStageLoopMetrics::default();
+ let mut bundle_stage_leader_metrics = BundleStageLeaderMetrics::new(0);
+
+ // receive and buffer bundles to the cost model reserve to test the capacity/dropped bundles there
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+ // buffered bundles are moved to cost model side deque
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles0, bundles_to_process);
+ vec![Err(BundleExecutionError::ExceedsCostModel); bundles_to_process.len()]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 5);
+
+ let bundles1 = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash());
+ sender.send(bundles1.clone()).unwrap();
+ // should get 5 more bundles + cost model buffered length should be 10
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+ // buffered bundles are moved to cost model side deque
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles1, bundles_to_process);
+ vec![Err(BundleExecutionError::ExceedsCostModel); bundles_to_process.len()]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 10);
+
+ let bundles2 = make_random_bundles(&mint_keypair, 5, 2, genesis_config.hash());
+ sender.send(bundles2.clone()).unwrap();
+
+ // this set will get dropped from cost model buffered bundles
+ let result = bundle_receiver.receive_and_buffer_bundles(
+ &mut unprocessed_storage,
+ &mut bundle_stage_stats,
+ &mut bundle_stage_leader_metrics,
+ );
+ assert!(result.is_ok());
+
+ let bundle_storage = unprocessed_storage.bundle_storage().unwrap();
+ // buffered bundles are moved to cost model side deque, but its at capacity so stays the same size
+ assert!(!bundle_storage.process_bundles(
+ bank_forks.read().unwrap().working_bank(),
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ assert_bundles_same(&bundles2, bundles_to_process);
+ vec![Err(BundleExecutionError::ExceedsCostModel); bundles_to_process.len()]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 10);
+
+ // create new bank then call process_bundles again, expect to see [bundles1,bundles2]
+ let bank = bank_forks.read().unwrap().working_bank();
+ let new_bank = Arc::new(Bank::new_from_parent(
+ bank.clone(),
+ bank.collector_id(),
+ bank.slot() + 1,
+ ));
+ assert!(!bundle_storage.process_bundles(
+ new_bank,
+ &mut bundle_stage_leader_metrics,
+ &HashSet::default(),
+ |bundles_to_process, _stats| {
+ // make sure same order as original
+ let expected_bundles: Vec<_> =
+ bundles0.iter().chain(bundles1.iter()).cloned().collect();
+ assert_bundles_same(&expected_bundles, bundles_to_process);
+ vec![Ok(()); bundles_to_process.len()]
+ }
+ ));
+ assert_eq!(bundle_storage.unprocessed_bundles_len(), 0);
+ assert_eq!(bundle_storage.cost_model_buffered_bundles_len(), 0);
+ }
+}
diff --git a/core/src/bundle_stage/bundle_reserved_space_manager.rs b/core/src/bundle_stage/bundle_reserved_space_manager.rs
new file mode 100644
index 00000000000000..a6b858d3b23ee7
--- /dev/null
+++ b/core/src/bundle_stage/bundle_reserved_space_manager.rs
@@ -0,0 +1,239 @@
+use {solana_runtime::bank::Bank, solana_sdk::clock::Slot, std::sync::Arc};
+
+/// Manager responsible for reserving `bundle_reserved_cost` during the first `reserved_ticks` of a bank
+/// and resetting the block cost limit to `block_cost_limit` after the reserved tick period is over
+pub struct BundleReservedSpaceManager {
+ // the bank's cost limit
+ block_cost_limit: u64,
+ // bundles get this much reserved space for the first reserved_ticks
+ bundle_reserved_cost: u64,
+ // a reduced block_compute_limit is reserved for this many ticks, afterwards it goes back to full cost
+ reserved_ticks: u64,
+ last_slot_updated: Slot,
+}
+
+impl BundleReservedSpaceManager {
+ pub fn new(block_cost_limit: u64, bundle_reserved_cost: u64, reserved_ticks: u64) -> Self {
+ Self {
+ block_cost_limit,
+ bundle_reserved_cost,
+ reserved_ticks,
+ last_slot_updated: u64::MAX,
+ }
+ }
+
+ /// Call this on creation of new bank and periodically while bundle processing
+ /// to manage the block_cost_limits
+ pub fn tick(&mut self, bank: &Arc) {
+ if self.last_slot_updated == bank.slot() && !self.is_in_reserved_tick_period(bank) {
+ // new slot logic already ran, need to revert the block cost limit to original if
+ // ticks are past the reserved tick mark
+ debug!(
+ "slot: {} ticks: {}, resetting block_cost_limit to {}",
+ bank.slot(),
+ bank.tick_height(),
+ self.block_cost_limit
+ );
+ bank.write_cost_tracker()
+ .unwrap()
+ .set_block_cost_limit(self.block_cost_limit);
+ } else if self.last_slot_updated != bank.slot() && self.is_in_reserved_tick_period(bank) {
+ // new slot, if in the first max_tick - tick_height slots reserve space
+ // otherwise can leave the current block limit as is
+ let new_block_cost_limit = self.reduced_block_cost_limit();
+ debug!(
+ "slot: {} ticks: {}, reserving block_cost_limit with block_cost_limit of {}",
+ bank.slot(),
+ bank.tick_height(),
+ new_block_cost_limit
+ );
+ bank.write_cost_tracker()
+ .unwrap()
+ .set_block_cost_limit(new_block_cost_limit);
+ self.last_slot_updated = bank.slot();
+ }
+ }
+
+ /// return true if the bank is still in the period where block_cost_limits is reduced
+ pub fn is_in_reserved_tick_period(&self, bank: &Bank) -> bool {
+ bank.tick_height() % bank.ticks_per_slot() < self.reserved_ticks
+ }
+
+ /// return the block_cost_limits as determined by the tick height of the bank
+ pub fn expected_block_cost_limits(&self, bank: &Bank) -> u64 {
+ if self.is_in_reserved_tick_period(bank) {
+ self.reduced_block_cost_limit()
+ } else {
+ self.block_cost_limit()
+ }
+ }
+
+ pub fn reduced_block_cost_limit(&self) -> u64 {
+ self.block_cost_limit
+ .saturating_sub(self.bundle_reserved_cost)
+ }
+
+ pub fn block_cost_limit(&self) -> u64 {
+ self.block_cost_limit
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use {
+ crate::bundle_stage::bundle_reserved_space_manager::BundleReservedSpaceManager,
+ solana_ledger::genesis_utils::create_genesis_config,
+ solana_runtime::bank::Bank,
+ solana_sdk::{hash::Hash, pubkey::Pubkey},
+ std::sync::Arc,
+ };
+
+ #[test]
+ fn test_reserve_block_cost_limits_during_reserved_ticks() {
+ const BUNDLE_BLOCK_COST_LIMITS_RESERVATION: u64 = 100;
+
+ let genesis_config_info = create_genesis_config(100);
+ let bank = Arc::new(Bank::new_for_tests(&genesis_config_info.genesis_config));
+
+ let block_cost_limits = bank.read_cost_tracker().unwrap().block_cost_limit();
+
+ let mut reserved_space = BundleReservedSpaceManager::new(
+ block_cost_limits,
+ BUNDLE_BLOCK_COST_LIMITS_RESERVATION,
+ 5,
+ );
+ reserved_space.tick(&bank);
+
+ assert_eq!(
+ bank.read_cost_tracker().unwrap().block_cost_limit(),
+ block_cost_limits - BUNDLE_BLOCK_COST_LIMITS_RESERVATION
+ );
+ }
+
+ #[test]
+ fn test_dont_reserve_block_cost_limits_after_reserved_ticks() {
+ const BUNDLE_BLOCK_COST_LIMITS_RESERVATION: u64 = 100;
+
+ let genesis_config_info = create_genesis_config(100);
+ let bank = Arc::new(Bank::new_for_tests(&genesis_config_info.genesis_config));
+
+ let block_cost_limits = bank.read_cost_tracker().unwrap().block_cost_limit();
+
+ for _ in 0..5 {
+ bank.register_tick(&Hash::default());
+ }
+
+ let mut reserved_space = BundleReservedSpaceManager::new(
+ block_cost_limits,
+ BUNDLE_BLOCK_COST_LIMITS_RESERVATION,
+ 5,
+ );
+ reserved_space.tick(&bank);
+
+ assert_eq!(
+ bank.read_cost_tracker().unwrap().block_cost_limit(),
+ block_cost_limits
+ );
+ }
+
+ #[test]
+ fn test_dont_reset_block_cost_limits_during_reserved_ticks() {
+ const BUNDLE_BLOCK_COST_LIMITS_RESERVATION: u64 = 100;
+
+ let genesis_config_info = create_genesis_config(100);
+ let bank = Arc::new(Bank::new_for_tests(&genesis_config_info.genesis_config));
+
+ let block_cost_limits = bank.read_cost_tracker().unwrap().block_cost_limit();
+
+ let mut reserved_space = BundleReservedSpaceManager::new(
+ block_cost_limits,
+ BUNDLE_BLOCK_COST_LIMITS_RESERVATION,
+ 5,
+ );
+
+ reserved_space.tick(&bank);
+ bank.register_tick(&Hash::default());
+ reserved_space.tick(&bank);
+
+ assert_eq!(
+ bank.read_cost_tracker().unwrap().block_cost_limit(),
+ block_cost_limits - BUNDLE_BLOCK_COST_LIMITS_RESERVATION
+ );
+ }
+
+ #[test]
+ fn test_reset_block_cost_limits_after_reserved_ticks() {
+ const BUNDLE_BLOCK_COST_LIMITS_RESERVATION: u64 = 100;
+
+ let genesis_config_info = create_genesis_config(100);
+ let bank = Arc::new(Bank::new_for_tests(&genesis_config_info.genesis_config));
+
+ let block_cost_limits = bank.read_cost_tracker().unwrap().block_cost_limit();
+
+ let mut reserved_space = BundleReservedSpaceManager::new(
+ block_cost_limits,
+ BUNDLE_BLOCK_COST_LIMITS_RESERVATION,
+ 5,
+ );
+
+ reserved_space.tick(&bank);
+
+ for _ in 0..5 {
+ bank.register_tick(&Hash::default());
+ }
+ reserved_space.tick(&bank);
+
+ assert_eq!(
+ bank.read_cost_tracker().unwrap().block_cost_limit(),
+ block_cost_limits
+ );
+ }
+
+ #[test]
+ fn test_block_limits_after_first_slot() {
+ const BUNDLE_BLOCK_COST_LIMITS_RESERVATION: u64 = 100;
+ const RESERVED_TICKS: u64 = 5;
+ let genesis_config_info = create_genesis_config(100);
+ let bank = Arc::new(Bank::new_for_tests(&genesis_config_info.genesis_config));
+
+ for _ in 0..genesis_config_info.genesis_config.ticks_per_slot {
+ bank.register_tick(&Hash::default());
+ }
+ assert!(bank.is_complete());
+ bank.freeze();
+ assert_eq!(
+ bank.read_cost_tracker().unwrap().block_cost_limit(),
+ solana_cost_model::block_cost_limits::MAX_BLOCK_UNITS,
+ );
+
+ let bank1 = Arc::new(Bank::new_from_parent(bank.clone(), &Pubkey::default(), 1));
+ assert_eq!(bank1.slot(), 1);
+ assert_eq!(bank1.tick_height(), 64);
+ assert_eq!(bank1.max_tick_height(), 128);
+
+ // reserve space
+ let block_cost_limits = bank1.read_cost_tracker().unwrap().block_cost_limit();
+ let mut reserved_space = BundleReservedSpaceManager::new(
+ block_cost_limits,
+ BUNDLE_BLOCK_COST_LIMITS_RESERVATION,
+ RESERVED_TICKS,
+ );
+ reserved_space.tick(&bank1);
+
+ // wait for reservation to be over
+ (0..RESERVED_TICKS).for_each(|_| {
+ bank1.register_tick(&Hash::default());
+ assert_eq!(
+ bank1.read_cost_tracker().unwrap().block_cost_limit(),
+ block_cost_limits - BUNDLE_BLOCK_COST_LIMITS_RESERVATION
+ );
+ });
+ reserved_space.tick(&bank1);
+
+ // after reservation, revert back to normal limit
+ assert_eq!(
+ bank1.read_cost_tracker().unwrap().block_cost_limit(),
+ solana_cost_model::block_cost_limits::MAX_BLOCK_UNITS,
+ );
+ }
+}
diff --git a/core/src/bundle_stage/bundle_stage_leader_metrics.rs b/core/src/bundle_stage/bundle_stage_leader_metrics.rs
new file mode 100644
index 00000000000000..52c1aa07145556
--- /dev/null
+++ b/core/src/bundle_stage/bundle_stage_leader_metrics.rs
@@ -0,0 +1,502 @@
+use {
+ crate::{
+ banking_stage::{leader_slot_metrics, leader_slot_metrics::LeaderSlotMetricsTracker},
+ immutable_deserialized_bundle::DeserializedBundleError,
+ },
+ solana_bundle::{bundle_execution::LoadAndExecuteBundleError, BundleExecutionError},
+ solana_poh::poh_recorder::BankStart,
+ solana_sdk::{bundle::SanitizedBundle, clock::Slot, saturating_add_assign},
+};
+
+pub struct BundleStageLeaderMetrics {
+ bundle_stage_metrics_tracker: BundleStageStatsMetricsTracker,
+ leader_slot_metrics_tracker: LeaderSlotMetricsTracker,
+}
+
+pub(crate) enum MetricsTrackerAction {
+ Noop,
+ ReportAndResetTracker,
+ NewTracker(Option),
+ ReportAndNewTracker(Option),
+}
+
+impl BundleStageLeaderMetrics {
+ pub fn new(id: u32) -> Self {
+ Self {
+ bundle_stage_metrics_tracker: BundleStageStatsMetricsTracker::new(id),
+ leader_slot_metrics_tracker: LeaderSlotMetricsTracker::new(id),
+ }
+ }
+
+ pub(crate) fn check_leader_slot_boundary(
+ &mut self,
+ bank_start: Option<&BankStart>,
+ ) -> (
+ leader_slot_metrics::MetricsTrackerAction,
+ MetricsTrackerAction,
+ ) {
+ let banking_stage_metrics_action = self
+ .leader_slot_metrics_tracker
+ .check_leader_slot_boundary(bank_start);
+ let bundle_stage_metrics_action = self
+ .bundle_stage_metrics_tracker
+ .check_leader_slot_boundary(bank_start);
+ (banking_stage_metrics_action, bundle_stage_metrics_action)
+ }
+
+ pub(crate) fn apply_action(
+ &mut self,
+ banking_stage_metrics_action: leader_slot_metrics::MetricsTrackerAction,
+ bundle_stage_metrics_action: MetricsTrackerAction,
+ ) -> Option {
+ self.leader_slot_metrics_tracker
+ .apply_action(banking_stage_metrics_action);
+ self.bundle_stage_metrics_tracker
+ .apply_action(bundle_stage_metrics_action)
+ }
+
+ pub fn leader_slot_metrics_tracker(&mut self) -> &mut LeaderSlotMetricsTracker {
+ &mut self.leader_slot_metrics_tracker
+ }
+
+ pub fn bundle_stage_metrics_tracker(&mut self) -> &mut BundleStageStatsMetricsTracker {
+ &mut self.bundle_stage_metrics_tracker
+ }
+}
+
+pub struct BundleStageStatsMetricsTracker {
+ bundle_stage_metrics: Option,
+ id: u32,
+}
+
+impl BundleStageStatsMetricsTracker {
+ pub fn new(id: u32) -> Self {
+ Self {
+ bundle_stage_metrics: None,
+ id,
+ }
+ }
+
+ /// Similar to as LeaderSlotMetricsTracker::check_leader_slot_boundary
+ pub(crate) fn check_leader_slot_boundary(
+ &mut self,
+ bank_start: Option<&BankStart>,
+ ) -> MetricsTrackerAction {
+ match (self.bundle_stage_metrics.as_mut(), bank_start) {
+ (None, None) => MetricsTrackerAction::Noop,
+ (Some(_), None) => MetricsTrackerAction::ReportAndResetTracker,
+ // Our leader slot has begun, time to create a new slot tracker
+ (None, Some(bank_start)) => MetricsTrackerAction::NewTracker(Some(
+ BundleStageStats::new(self.id, bank_start.working_bank.slot()),
+ )),
+ (Some(bundle_stage_metrics), Some(bank_start)) => {
+ if bundle_stage_metrics.slot != bank_start.working_bank.slot() {
+ // Last slot has ended, new slot has began
+ MetricsTrackerAction::ReportAndNewTracker(Some(BundleStageStats::new(
+ self.id,
+ bank_start.working_bank.slot(),
+ )))
+ } else {
+ MetricsTrackerAction::Noop
+ }
+ }
+ }
+ }
+
+ /// Similar to LeaderSlotMetricsTracker::apply_action
+ pub(crate) fn apply_action(&mut self, action: MetricsTrackerAction) -> Option {
+ match action {
+ MetricsTrackerAction::Noop => None,
+ MetricsTrackerAction::ReportAndResetTracker => {
+ let mut reported_slot = None;
+ if let Some(bundle_stage_metrics) = self.bundle_stage_metrics.as_mut() {
+ bundle_stage_metrics.report();
+ reported_slot = bundle_stage_metrics.reported_slot();
+ }
+ self.bundle_stage_metrics = None;
+ reported_slot
+ }
+ MetricsTrackerAction::NewTracker(new_bundle_stage_metrics) => {
+ self.bundle_stage_metrics = new_bundle_stage_metrics;
+ self.bundle_stage_metrics.as_ref().unwrap().reported_slot()
+ }
+ MetricsTrackerAction::ReportAndNewTracker(new_bundle_stage_metrics) => {
+ let mut reported_slot = None;
+ if let Some(bundle_stage_metrics) = self.bundle_stage_metrics.as_mut() {
+ bundle_stage_metrics.report();
+ reported_slot = bundle_stage_metrics.reported_slot();
+ }
+ self.bundle_stage_metrics = new_bundle_stage_metrics;
+ reported_slot
+ }
+ }
+ }
+
+ pub(crate) fn increment_sanitize_transaction_result(
+ &mut self,
+ result: &Result,
+ ) {
+ if let Some(bundle_stage_metrics) = self.bundle_stage_metrics.as_mut() {
+ match result {
+ Ok(_) => {
+ saturating_add_assign!(bundle_stage_metrics.sanitize_transaction_ok, 1);
+ }
+ Err(e) => match e {
+ DeserializedBundleError::VoteOnlyMode => {
+ saturating_add_assign!(
+ bundle_stage_metrics.sanitize_transaction_vote_only_mode,
+ 1
+ );
+ }
+ DeserializedBundleError::BlacklistedAccount => {
+ saturating_add_assign!(
+ bundle_stage_metrics.sanitize_transaction_blacklisted_account,
+ 1
+ );
+ }
+ DeserializedBundleError::FailedToSerializeTransaction => {
+ saturating_add_assign!(
+ bundle_stage_metrics.sanitize_transaction_failed_to_serialize,
+ 1
+ );
+ }
+ DeserializedBundleError::DuplicateTransaction => {
+ saturating_add_assign!(
+ bundle_stage_metrics.sanitize_transaction_duplicate_transaction,
+ 1
+ );
+ }
+ DeserializedBundleError::FailedCheckTransactions => {
+ saturating_add_assign!(
+ bundle_stage_metrics.sanitize_transaction_failed_check,
+ 1
+ );
+ }
+ DeserializedBundleError::FailedToSerializePacket => {
+ saturating_add_assign!(
+ bundle_stage_metrics.sanitize_transaction_failed_to_serialize,
+ 1
+ );
+ }
+ DeserializedBundleError::EmptyBatch => {
+ saturating_add_assign!(
+ bundle_stage_metrics.sanitize_transaction_failed_empty_batch,
+ 1
+ );
+ }
+ DeserializedBundleError::TooManyPackets => {
+ saturating_add_assign!(
+ bundle_stage_metrics.sanitize_transaction_failed_too_many_packets,
+ 1
+ );
+ }
+ DeserializedBundleError::MarkedDiscard => {
+ saturating_add_assign!(
+ bundle_stage_metrics.sanitize_transaction_failed_marked_discard,
+ 1
+ );
+ }
+ DeserializedBundleError::SignatureVerificationFailure => {
+ saturating_add_assign!(
+ bundle_stage_metrics.sanitize_transaction_failed_sig_verify_failed,
+ 1
+ );
+ }
+ },
+ }
+ }
+ }
+
+ pub fn increment_bundle_execution_result(&mut self, result: &Result<(), BundleExecutionError>) {
+ if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics {
+ match result {
+ Ok(_) => {
+ saturating_add_assign!(bundle_stage_metrics.execution_results_ok, 1);
+ }
+ Err(BundleExecutionError::PohRecordError(_))
+ | Err(BundleExecutionError::BankProcessingTimeLimitReached) => {
+ saturating_add_assign!(
+ bundle_stage_metrics.execution_results_poh_max_height,
+ 1
+ );
+ }
+ Err(BundleExecutionError::TransactionFailure(
+ LoadAndExecuteBundleError::ProcessingTimeExceeded(_),
+ )) => {
+ saturating_add_assign!(bundle_stage_metrics.num_execution_timeouts, 1);
+ }
+ Err(BundleExecutionError::TransactionFailure(
+ LoadAndExecuteBundleError::TransactionError { .. },
+ )) => {
+ saturating_add_assign!(
+ bundle_stage_metrics.execution_results_transaction_failures,
+ 1
+ );
+ }
+ Err(BundleExecutionError::TransactionFailure(
+ LoadAndExecuteBundleError::LockError { .. },
+ ))
+ | Err(BundleExecutionError::LockError) => {
+ saturating_add_assign!(bundle_stage_metrics.num_lock_errors, 1);
+ }
+ Err(BundleExecutionError::ExceedsCostModel) => {
+ saturating_add_assign!(
+ bundle_stage_metrics.execution_results_exceeds_cost_model,
+ 1
+ );
+ }
+ Err(BundleExecutionError::TipError(_)) => {
+ saturating_add_assign!(bundle_stage_metrics.execution_results_tip_errors, 1);
+ }
+ Err(BundleExecutionError::TransactionFailure(
+ LoadAndExecuteBundleError::InvalidPreOrPostAccounts,
+ )) => {
+ saturating_add_assign!(bundle_stage_metrics.bad_argument, 1);
+ }
+ }
+ }
+ }
+
+ pub(crate) fn increment_sanitize_bundle_elapsed_us(&mut self, count: u64) {
+ if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics {
+ saturating_add_assign!(bundle_stage_metrics.sanitize_bundle_elapsed_us, count);
+ }
+ }
+
+ pub(crate) fn increment_locked_bundle_elapsed_us(&mut self, count: u64) {
+ if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics {
+ saturating_add_assign!(bundle_stage_metrics.locked_bundle_elapsed_us, count);
+ }
+ }
+
+ pub(crate) fn increment_num_init_tip_account_errors(&mut self, count: u64) {
+ if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics {
+ saturating_add_assign!(bundle_stage_metrics.num_init_tip_account_errors, count);
+ }
+ }
+
+ pub(crate) fn increment_num_init_tip_account_ok(&mut self, count: u64) {
+ if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics {
+ saturating_add_assign!(bundle_stage_metrics.num_init_tip_account_ok, count);
+ }
+ }
+
+ pub(crate) fn increment_num_change_tip_receiver_errors(&mut self, count: u64) {
+ if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics {
+ saturating_add_assign!(bundle_stage_metrics.num_change_tip_receiver_errors, count);
+ }
+ }
+
+ pub(crate) fn increment_num_change_tip_receiver_ok(&mut self, count: u64) {
+ if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics {
+ saturating_add_assign!(bundle_stage_metrics.num_change_tip_receiver_ok, count);
+ }
+ }
+
+ pub(crate) fn increment_change_tip_receiver_elapsed_us(&mut self, count: u64) {
+ if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics {
+ saturating_add_assign!(bundle_stage_metrics.change_tip_receiver_elapsed_us, count);
+ }
+ }
+
+ pub(crate) fn increment_num_execution_retries(&mut self, count: u64) {
+ if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics {
+ saturating_add_assign!(bundle_stage_metrics.num_execution_retries, count);
+ }
+ }
+
+ pub(crate) fn increment_execute_locked_bundles_elapsed_us(&mut self, count: u64) {
+ if let Some(bundle_stage_metrics) = &mut self.bundle_stage_metrics {
+ saturating_add_assign!(
+ bundle_stage_metrics.execute_locked_bundles_elapsed_us,
+ count
+ );
+ }
+ }
+}
+
+#[derive(Default)]
+pub struct BundleStageStats {
+ id: u32,
+ slot: u64,
+ is_reported: bool,
+
+ sanitize_transaction_ok: u64,
+ sanitize_transaction_vote_only_mode: u64,
+ sanitize_transaction_blacklisted_account: u64,
+ sanitize_transaction_failed_to_serialize: u64,
+ sanitize_transaction_duplicate_transaction: u64,
+ sanitize_transaction_failed_check: u64,
+ sanitize_bundle_elapsed_us: u64,
+ sanitize_transaction_failed_empty_batch: u64,
+ sanitize_transaction_failed_too_many_packets: u64,
+ sanitize_transaction_failed_marked_discard: u64,
+ sanitize_transaction_failed_sig_verify_failed: u64,
+
+ locked_bundle_elapsed_us: u64,
+
+ num_lock_errors: u64,
+
+ num_init_tip_account_errors: u64,
+ num_init_tip_account_ok: u64,
+
+ num_change_tip_receiver_errors: u64,
+ num_change_tip_receiver_ok: u64,
+ change_tip_receiver_elapsed_us: u64,
+
+ num_execution_timeouts: u64,
+ num_execution_retries: u64,
+
+ execute_locked_bundles_elapsed_us: u64,
+
+ execution_results_ok: u64,
+ execution_results_poh_max_height: u64,
+ execution_results_transaction_failures: u64,
+ execution_results_exceeds_cost_model: u64,
+ execution_results_tip_errors: u64,
+ execution_results_max_retries: u64,
+
+ bad_argument: u64,
+}
+
+impl BundleStageStats {
+ pub fn new(id: u32, slot: Slot) -> BundleStageStats {
+ BundleStageStats {
+ id,
+ slot,
+ is_reported: false,
+ ..BundleStageStats::default()
+ }
+ }
+
+ /// Returns `Some(self.slot)` if the metrics have been reported, otherwise returns None
+ fn reported_slot(&self) -> Option {
+ if self.is_reported {
+ Some(self.slot)
+ } else {
+ None
+ }
+ }
+
+ pub fn report(&mut self) {
+ self.is_reported = true;
+
+ datapoint_info!(
+ "bundle_stage-stats",
+ ("id", self.id, i64),
+ ("slot", self.slot, i64),
+ ("num_sanitized_ok", self.sanitize_transaction_ok, i64),
+ (
+ "sanitize_transaction_vote_only_mode",
+ self.sanitize_transaction_vote_only_mode,
+ i64
+ ),
+ (
+ "sanitize_transaction_blacklisted_account",
+ self.sanitize_transaction_blacklisted_account,
+ i64
+ ),
+ (
+ "sanitize_transaction_failed_to_serialize",
+ self.sanitize_transaction_failed_to_serialize,
+ i64
+ ),
+ (
+ "sanitize_transaction_duplicate_transaction",
+ self.sanitize_transaction_duplicate_transaction,
+ i64
+ ),
+ (
+ "sanitize_transaction_failed_check",
+ self.sanitize_transaction_failed_check,
+ i64
+ ),
+ (
+ "sanitize_bundle_elapsed_us",
+ self.sanitize_bundle_elapsed_us,
+ i64
+ ),
+ (
+ "sanitize_transaction_failed_empty_batch",
+ self.sanitize_transaction_failed_empty_batch,
+ i64
+ ),
+ (
+ "sanitize_transaction_failed_too_many_packets",
+ self.sanitize_transaction_failed_too_many_packets,
+ i64
+ ),
+ (
+ "sanitize_transaction_failed_marked_discard",
+ self.sanitize_transaction_failed_marked_discard,
+ i64
+ ),
+ (
+ "sanitize_transaction_failed_sig_verify_failed",
+ self.sanitize_transaction_failed_sig_verify_failed,
+ i64
+ ),
+ (
+ "locked_bundle_elapsed_us",
+ self.locked_bundle_elapsed_us,
+ i64
+ ),
+ ("num_lock_errors", self.num_lock_errors, i64),
+ (
+ "num_init_tip_account_errors",
+ self.num_init_tip_account_errors,
+ i64
+ ),
+ ("num_init_tip_account_ok", self.num_init_tip_account_ok, i64),
+ (
+ "num_change_tip_receiver_errors",
+ self.num_change_tip_receiver_errors,
+ i64
+ ),
+ (
+ "num_change_tip_receiver_ok",
+ self.num_change_tip_receiver_ok,
+ i64
+ ),
+ (
+ "change_tip_receiver_elapsed_us",
+ self.change_tip_receiver_elapsed_us,
+ i64
+ ),
+ ("num_execution_timeouts", self.num_execution_timeouts, i64),
+ ("num_execution_retries", self.num_execution_retries, i64),
+ (
+ "execute_locked_bundles_elapsed_us",
+ self.execute_locked_bundles_elapsed_us,
+ i64
+ ),
+ ("execution_results_ok", self.execution_results_ok, i64),
+ (
+ "execution_results_poh_max_height",
+ self.execution_results_poh_max_height,
+ i64
+ ),
+ (
+ "execution_results_transaction_failures",
+ self.execution_results_transaction_failures,
+ i64
+ ),
+ (
+ "execution_results_exceeds_cost_model",
+ self.execution_results_exceeds_cost_model,
+ i64
+ ),
+ (
+ "execution_results_tip_errors",
+ self.execution_results_tip_errors,
+ i64
+ ),
+ (
+ "execution_results_max_retries",
+ self.execution_results_max_retries,
+ i64
+ ),
+ ("bad_argument", self.bad_argument, i64)
+ );
+ }
+}
diff --git a/core/src/bundle_stage/committer.rs b/core/src/bundle_stage/committer.rs
new file mode 100644
index 00000000000000..5bdf0c0b5a26a6
--- /dev/null
+++ b/core/src/bundle_stage/committer.rs
@@ -0,0 +1,218 @@
+use {
+ crate::banking_stage::{
+ committer::CommitTransactionDetails,
+ leader_slot_timing_metrics::LeaderExecuteAndCommitTimings,
+ },
+ solana_accounts_db::transaction_results::TransactionResults,
+ solana_bundle::bundle_execution::LoadAndExecuteBundleOutput,
+ solana_ledger::blockstore_processor::TransactionStatusSender,
+ solana_measure::measure_us,
+ solana_runtime::{
+ bank::{Bank, CommitTransactionCounts, TransactionBalances, TransactionBalancesSet},
+ bank_utils,
+ prioritization_fee_cache::PrioritizationFeeCache,
+ },
+ solana_sdk::{hash::Hash, saturating_add_assign, transaction::SanitizedTransaction},
+ solana_transaction_status::{
+ token_balances::{TransactionTokenBalances, TransactionTokenBalancesSet},
+ PreBalanceInfo,
+ },
+ solana_vote::vote_sender_types::ReplayVoteSender,
+ std::sync::Arc,
+};
+
+#[derive(Clone, Debug, Default, PartialEq, Eq)]
+pub struct CommitBundleDetails {
+ pub commit_transaction_details: Vec>,
+}
+
+pub struct Committer {
+ transaction_status_sender: Option,
+ replay_vote_sender: ReplayVoteSender,
+ prioritization_fee_cache: Arc,
+}
+
+impl Committer {
+ pub fn new(
+ transaction_status_sender: Option,
+ replay_vote_sender: ReplayVoteSender,
+ prioritization_fee_cache: Arc,
+ ) -> Self {
+ Self {
+ transaction_status_sender,
+ replay_vote_sender,
+ prioritization_fee_cache,
+ }
+ }
+
+ pub(crate) fn transaction_status_sender_enabled(&self) -> bool {
+ self.transaction_status_sender.is_some()
+ }
+
+ /// Very similar to Committer::commit_transactions, but works with bundles.
+ /// The main difference is there's multiple non-parallelizable transaction vectors to commit
+ /// and post-balances are collected after execution instead of from the bank in Self::collect_balances_and_send_status_batch.
+ #[allow(clippy::too_many_arguments)]
+ pub(crate) fn commit_bundle<'a>(
+ &self,
+ bundle_execution_output: &'a mut LoadAndExecuteBundleOutput<'a>,
+ last_blockhash: Hash,
+ lamports_per_signature: u64,
+ mut starting_transaction_index: Option,
+ bank: &Arc,
+ execute_and_commit_timings: &mut LeaderExecuteAndCommitTimings,
+ ) -> (u64, CommitBundleDetails) {
+ let transaction_output = bundle_execution_output.bundle_transaction_results_mut();
+
+ let (commit_transaction_details, commit_times): (Vec<_>, Vec<_>) = transaction_output
+ .iter_mut()
+ .map(|bundle_results| {
+ let committed_transactions_count = bundle_results
+ .load_and_execute_transactions_output()
+ .executed_transactions_count
+ as u64;
+
+ let committed_non_vote_transactions_count = bundle_results
+ .load_and_execute_transactions_output()
+ .executed_non_vote_transactions_count
+ as u64;
+
+ let committed_with_failure_result_count = bundle_results
+ .load_and_execute_transactions_output()
+ .executed_transactions_count
+ .saturating_sub(
+ bundle_results
+ .load_and_execute_transactions_output()
+ .executed_with_successful_result_count,
+ ) as u64;
+
+ let signature_count = bundle_results
+ .load_and_execute_transactions_output()
+ .signature_count;
+
+ let sanitized_transactions = bundle_results.transactions().to_vec();
+ let execution_results = bundle_results.execution_results().to_vec();
+
+ let loaded_transactions = bundle_results.loaded_transactions_mut();
+ debug!("loaded_transactions: {:?}", loaded_transactions);
+
+ let (tx_results, commit_time_us) = measure_us!(bank.commit_transactions(
+ &sanitized_transactions,
+ loaded_transactions,
+ execution_results,
+ last_blockhash,
+ lamports_per_signature,
+ CommitTransactionCounts {
+ committed_transactions_count,
+ committed_non_vote_transactions_count,
+ committed_with_failure_result_count,
+ signature_count,
+ },
+ &mut execute_and_commit_timings.execute_timings,
+ ));
+
+ let commit_transaction_statuses: Vec<_> = tx_results
+ .execution_results
+ .iter()
+ .map(|execution_result| match execution_result.details() {
+ Some(details) => CommitTransactionDetails::Committed {
+ compute_units: details.executed_units,
+ },
+ None => CommitTransactionDetails::NotCommitted,
+ })
+ .collect();
+
+ let ((), find_and_send_votes_us) = measure_us!({
+ bank_utils::find_and_send_votes(
+ &sanitized_transactions,
+ &tx_results,
+ Some(&self.replay_vote_sender),
+ );
+
+ let post_balance_info = bundle_results.post_balance_info().clone();
+ let pre_balance_info = bundle_results.pre_balance_info();
+
+ let num_committed = tx_results
+ .execution_results
+ .iter()
+ .filter(|r| r.was_executed())
+ .count();
+
+ self.collect_balances_and_send_status_batch(
+ tx_results,
+ bank,
+ sanitized_transactions,
+ pre_balance_info,
+ post_balance_info,
+ starting_transaction_index,
+ );
+
+ // NOTE: we're doing batched records, so we need to increment the poh starting_transaction_index
+ // by number committed so the next batch will have the correct starting_transaction_index
+ starting_transaction_index =
+ starting_transaction_index.map(|starting_transaction_index| {
+ starting_transaction_index.saturating_add(num_committed)
+ });
+
+ self.prioritization_fee_cache
+ .update(bank, bundle_results.executed_transactions().into_iter());
+ });
+ saturating_add_assign!(
+ execute_and_commit_timings.find_and_send_votes_us,
+ find_and_send_votes_us
+ );
+
+ (commit_transaction_statuses, commit_time_us)
+ })
+ .unzip();
+
+ (
+ commit_times.iter().sum(),
+ CommitBundleDetails {
+ commit_transaction_details,
+ },
+ )
+ }
+
+ fn collect_balances_and_send_status_batch(
+ &self,
+ tx_results: TransactionResults,
+ bank: &Arc,
+ sanitized_transactions: Vec,
+ pre_balance_info: &mut PreBalanceInfo,
+ (post_balances, post_token_balances): (TransactionBalances, TransactionTokenBalances),
+ starting_transaction_index: Option,
+ ) {
+ if let Some(transaction_status_sender) = &self.transaction_status_sender {
+ let mut transaction_index = starting_transaction_index.unwrap_or_default();
+ let batch_transaction_indexes: Vec<_> = tx_results
+ .execution_results
+ .iter()
+ .map(|result| {
+ if result.was_executed() {
+ let this_transaction_index = transaction_index;
+ saturating_add_assign!(transaction_index, 1);
+ this_transaction_index
+ } else {
+ 0
+ }
+ })
+ .collect();
+ transaction_status_sender.send_transaction_status_batch(
+ bank.clone(),
+ sanitized_transactions,
+ tx_results.execution_results,
+ TransactionBalancesSet::new(
+ std::mem::take(&mut pre_balance_info.native),
+ post_balances,
+ ),
+ TransactionTokenBalancesSet::new(
+ std::mem::take(&mut pre_balance_info.token),
+ post_token_balances,
+ ),
+ tx_results.rent_debits,
+ batch_transaction_indexes,
+ );
+ }
+ }
+}
diff --git a/core/src/bundle_stage/result.rs b/core/src/bundle_stage/result.rs
new file mode 100644
index 00000000000000..3370251791f650
--- /dev/null
+++ b/core/src/bundle_stage/result.rs
@@ -0,0 +1,41 @@
+use {
+ crate::{
+ bundle_stage::bundle_account_locker::BundleAccountLockerError, tip_manager::TipPaymentError,
+ },
+ anchor_lang::error::Error,
+ solana_bundle::bundle_execution::LoadAndExecuteBundleError,
+ solana_poh::poh_recorder::PohRecorderError,
+ thiserror::Error,
+};
+
+pub type BundleExecutionResult = Result;
+
+#[derive(Error, Debug, Clone)]
+pub enum BundleExecutionError {
+ #[error("PoH record error: {0}")]
+ PohRecordError(#[from] PohRecorderError),
+
+ #[error("Bank is done processing")]
+ BankProcessingDone,
+
+ #[error("Execution error: {0}")]
+ ExecutionError(#[from] LoadAndExecuteBundleError),
+
+ #[error("The bundle exceeds the cost model")]
+ ExceedsCostModel,
+
+ #[error("Tip error {0}")]
+ TipError(#[from] TipPaymentError),
+
+ #[error("Error locking bundle")]
+ LockError(#[from] BundleAccountLockerError),
+}
+
+impl From for TipPaymentError {
+ fn from(anchor_err: Error) -> Self {
+ match anchor_err {
+ Error::AnchorError(e) => Self::AnchorError(e.error_msg),
+ Error::ProgramError(e) => Self::AnchorError(e.to_string()),
+ }
+ }
+}
diff --git a/core/src/consensus_cache_updater.rs b/core/src/consensus_cache_updater.rs
new file mode 100644
index 00000000000000..e1dc137ba0ed30
--- /dev/null
+++ b/core/src/consensus_cache_updater.rs
@@ -0,0 +1,52 @@
+use {
+ solana_runtime::bank::Bank,
+ solana_sdk::{clock::Epoch, pubkey::Pubkey},
+ std::collections::HashSet,
+};
+
+#[derive(Default)]
+pub(crate) struct ConsensusCacheUpdater {
+ last_epoch_updated: Epoch,
+ consensus_accounts_cache: HashSet,
+}
+
+impl ConsensusCacheUpdater {
+ pub(crate) fn consensus_accounts_cache(&self) -> &HashSet {
+ &self.consensus_accounts_cache
+ }
+
+ /// Builds a HashSet of all consensus related accounts for the Bank's epoch
+ fn get_consensus_accounts(bank: &Bank) -> HashSet {
+ let mut consensus_accounts: HashSet = HashSet::new();
+ if let Some(epoch_stakes) = bank.epoch_stakes(bank.epoch()) {
+ // votes use the following accounts:
+ // - vote_account pubkey: writeable
+ // - authorized_voter_pubkey: read-only
+ // - node_keypair pubkey: payer (writeable)
+ let node_id_vote_accounts = epoch_stakes.node_id_to_vote_accounts();
+
+ let vote_accounts = node_id_vote_accounts
+ .values()
+ .flat_map(|v| v.vote_accounts.clone());
+
+ // vote_account
+ consensus_accounts.extend(vote_accounts);
+ // authorized_voter_pubkey
+ consensus_accounts.extend(epoch_stakes.epoch_authorized_voters().keys());
+ // node_keypair
+ consensus_accounts.extend(epoch_stakes.node_id_to_vote_accounts().keys());
+ }
+ consensus_accounts
+ }
+
+ /// Updates consensus-related accounts on epoch boundaries
+ pub(crate) fn maybe_update(&mut self, bank: &Bank) -> bool {
+ if bank.epoch() > self.last_epoch_updated {
+ self.consensus_accounts_cache = Self::get_consensus_accounts(bank);
+ self.last_epoch_updated = bank.epoch();
+ true
+ } else {
+ false
+ }
+ }
+}
diff --git a/core/src/immutable_deserialized_bundle.rs b/core/src/immutable_deserialized_bundle.rs
new file mode 100644
index 00000000000000..e4f9f8abb37f87
--- /dev/null
+++ b/core/src/immutable_deserialized_bundle.rs
@@ -0,0 +1,485 @@
+use {
+ crate::{
+ banking_stage::immutable_deserialized_packet::ImmutableDeserializedPacket,
+ packet_bundle::PacketBundle,
+ },
+ solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics,
+ solana_perf::sigverify::verify_packet,
+ solana_runtime::bank::Bank,
+ solana_sdk::{
+ bundle::SanitizedBundle, clock::MAX_PROCESSING_AGE, pubkey::Pubkey, signature::Signature,
+ transaction::SanitizedTransaction,
+ },
+ std::{
+ collections::{hash_map::RandomState, HashSet},
+ iter::repeat,
+ },
+ thiserror::Error,
+};
+
+#[derive(Debug, Error, Eq, PartialEq)]
+pub enum DeserializedBundleError {
+ #[error("FailedToSerializePacket")]
+ FailedToSerializePacket,
+
+ #[error("EmptyBatch")]
+ EmptyBatch,
+
+ #[error("TooManyPackets")]
+ TooManyPackets,
+
+ #[error("MarkedDiscard")]
+ MarkedDiscard,
+
+ #[error("SignatureVerificationFailure")]
+ SignatureVerificationFailure,
+
+ #[error("Bank is in vote-only mode")]
+ VoteOnlyMode,
+
+ #[error("Bundle mentions blacklisted account")]
+ BlacklistedAccount,
+
+ #[error("Bundle contains a transaction that failed to serialize")]
+ FailedToSerializeTransaction,
+
+ #[error("Bundle contains a duplicate transaction")]
+ DuplicateTransaction,
+
+ #[error("Bundle failed check_transactions")]
+ FailedCheckTransactions,
+}
+
+#[derive(Debug, PartialEq, Eq)]
+pub struct ImmutableDeserializedBundle {
+ bundle_id: String,
+ packets: Vec,
+}
+
+impl ImmutableDeserializedBundle {
+ pub fn new(
+ bundle: &mut PacketBundle,
+ max_len: Option,
+ ) -> Result {
+ // Checks: non-zero, less than some length, marked for discard, signature verification failed, failed to sanitize to
+ // ImmutableDeserializedPacket
+ if bundle.batch.is_empty() {
+ return Err(DeserializedBundleError::EmptyBatch);
+ }
+ if max_len
+ .map(|max_len| bundle.batch.len() > max_len)
+ .unwrap_or(false)
+ {
+ return Err(DeserializedBundleError::TooManyPackets);
+ }
+ if bundle.batch.iter().any(|p| p.meta().discard()) {
+ return Err(DeserializedBundleError::MarkedDiscard);
+ }
+ if bundle.batch.iter_mut().any(|p| !verify_packet(p, false)) {
+ return Err(DeserializedBundleError::SignatureVerificationFailure);
+ }
+
+ let immutable_packets: Vec<_> = bundle
+ .batch
+ .iter()
+ .filter_map(|p| ImmutableDeserializedPacket::new(p.clone()).ok())
+ .collect();
+
+ if bundle.batch.len() != immutable_packets.len() {
+ return Err(DeserializedBundleError::FailedToSerializePacket);
+ }
+
+ Ok(Self {
+ bundle_id: bundle.bundle_id.clone(),
+ packets: immutable_packets,
+ })
+ }
+
+ #[allow(clippy::len_without_is_empty)]
+ pub fn len(&self) -> usize {
+ self.packets.len()
+ }
+
+ pub fn bundle_id(&self) -> &str {
+ &self.bundle_id
+ }
+
+ /// A bundle has the following requirements:
+ /// - all transactions must be sanitiz-able
+ /// - no duplicate signatures
+ /// - must not contain a blacklisted account
+ /// - can't already be processed or contain a bad blockhash
+ pub fn build_sanitized_bundle(
+ &self,
+ bank: &Bank,
+ blacklisted_accounts: &HashSet,
+ transaction_error_metrics: &mut TransactionErrorMetrics,
+ ) -> Result {
+ if bank.vote_only_bank() {
+ return Err(DeserializedBundleError::VoteOnlyMode);
+ }
+
+ let transactions: Vec = self
+ .packets
+ .iter()
+ .filter_map(|p| {
+ p.build_sanitized_transaction(&bank.feature_set, bank.vote_only_bank(), bank)
+ })
+ .collect();
+
+ if self.packets.len() != transactions.len() {
+ return Err(DeserializedBundleError::FailedToSerializeTransaction);
+ }
+
+ let unique_signatures: HashSet<&Signature, RandomState> =
+ HashSet::from_iter(transactions.iter().map(|tx| tx.signature()));
+ if unique_signatures.len() != transactions.len() {
+ return Err(DeserializedBundleError::DuplicateTransaction);
+ }
+
+ let contains_blacklisted_account = transactions.iter().any(|tx| {
+ tx.message()
+ .account_keys()
+ .iter()
+ .any(|acc| blacklisted_accounts.contains(acc))
+ });
+
+ if contains_blacklisted_account {
+ return Err(DeserializedBundleError::BlacklistedAccount);
+ }
+
+ // assume everything locks okay to check for already-processed transaction or expired/invalid blockhash
+ let lock_results: Vec<_> = repeat(Ok(())).take(transactions.len()).collect();
+ let check_results = bank.check_transactions(
+ &transactions,
+ &lock_results,
+ MAX_PROCESSING_AGE,
+ transaction_error_metrics,
+ );
+
+ if check_results.iter().any(|r| r.0.is_err()) {
+ return Err(DeserializedBundleError::FailedCheckTransactions);
+ }
+
+ Ok(SanitizedBundle {
+ transactions,
+ bundle_id: self.bundle_id.clone(),
+ })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use {
+ crate::{
+ immutable_deserialized_bundle::{DeserializedBundleError, ImmutableDeserializedBundle},
+ packet_bundle::PacketBundle,
+ },
+ solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics,
+ solana_client::rpc_client::SerializableTransaction,
+ solana_ledger::genesis_utils::create_genesis_config,
+ solana_perf::packet::PacketBatch,
+ solana_runtime::{
+ bank::{Bank, NewBankOptions},
+ genesis_utils::GenesisConfigInfo,
+ },
+ solana_sdk::{
+ hash::Hash,
+ packet::Packet,
+ pubkey::Pubkey,
+ signature::{Keypair, Signer},
+ system_transaction::transfer,
+ },
+ std::{collections::HashSet, sync::Arc},
+ };
+
+ /// Happy case
+ #[test]
+ fn test_simple_get_sanitized_bundle() {
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let kp = Keypair::new();
+
+ let tx0 = transfer(&mint_keypair, &kp.pubkey(), 500, genesis_config.hash());
+
+ let tx1 = transfer(&mint_keypair, &kp.pubkey(), 501, genesis_config.hash());
+
+ let bundle = ImmutableDeserializedBundle::new(
+ &mut PacketBundle {
+ batch: PacketBatch::new(vec![
+ Packet::from_data(None, &tx0).unwrap(),
+ Packet::from_data(None, &tx1).unwrap(),
+ ]),
+ bundle_id: String::default(),
+ },
+ None,
+ )
+ .unwrap();
+
+ let mut transaction_errors = TransactionErrorMetrics::default();
+ let sanitized_bundle = bundle
+ .build_sanitized_bundle(&bank, &HashSet::default(), &mut transaction_errors)
+ .unwrap();
+ assert_eq!(sanitized_bundle.transactions.len(), 2);
+ assert_eq!(
+ sanitized_bundle.transactions[0].signature(),
+ tx0.get_signature()
+ );
+ assert_eq!(
+ sanitized_bundle.transactions[1].signature(),
+ tx1.get_signature()
+ );
+ }
+
+ #[test]
+ fn test_empty_batch_fails_to_init() {
+ assert_eq!(
+ ImmutableDeserializedBundle::new(
+ &mut PacketBundle {
+ batch: PacketBatch::new(vec![]),
+ bundle_id: String::default(),
+ },
+ None,
+ ),
+ Err(DeserializedBundleError::EmptyBatch)
+ );
+ }
+
+ #[test]
+ fn test_too_many_packets_fails_to_init() {
+ let kp = Keypair::new();
+
+ assert_eq!(
+ ImmutableDeserializedBundle::new(
+ &mut PacketBundle {
+ batch: PacketBatch::new(
+ (0..10)
+ .map(|i| {
+ Packet::from_data(
+ None,
+ transfer(&kp, &kp.pubkey(), i, Hash::default()),
+ )
+ .unwrap()
+ })
+ .collect()
+ ),
+ bundle_id: String::default(),
+ },
+ Some(5),
+ ),
+ Err(DeserializedBundleError::TooManyPackets)
+ );
+ }
+
+ #[test]
+ fn test_packets_marked_discard_fails_to_init() {
+ let kp = Keypair::new();
+
+ let mut packet =
+ Packet::from_data(None, transfer(&kp, &kp.pubkey(), 100, Hash::default())).unwrap();
+ packet.meta_mut().set_discard(true);
+
+ assert_eq!(
+ ImmutableDeserializedBundle::new(
+ &mut PacketBundle {
+ batch: PacketBatch::new(vec![packet]),
+ bundle_id: String::default(),
+ },
+ Some(5),
+ ),
+ Err(DeserializedBundleError::MarkedDiscard)
+ );
+ }
+
+ #[test]
+ fn test_bad_signature_fails_to_init() {
+ let kp0 = Keypair::new();
+ let kp1 = Keypair::new();
+
+ let mut tx0 = transfer(&kp0, &kp0.pubkey(), 100, Hash::default());
+ let tx1 = transfer(&kp1, &kp0.pubkey(), 100, Hash::default());
+ tx0.signatures = tx1.signatures;
+
+ assert_eq!(
+ ImmutableDeserializedBundle::new(
+ &mut PacketBundle {
+ batch: PacketBatch::new(vec![Packet::from_data(None, tx0).unwrap()]),
+ bundle_id: String::default(),
+ },
+ None
+ ),
+ Err(DeserializedBundleError::SignatureVerificationFailure)
+ );
+ }
+
+ #[test]
+ fn test_vote_only_bank_fails_to_build() {
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let parent = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+ let vote_only_bank = Arc::new(Bank::new_from_parent_with_options(
+ parent,
+ &Pubkey::new_unique(),
+ 1,
+ NewBankOptions {
+ vote_only_bank: true,
+ },
+ ));
+
+ let kp = Keypair::new();
+
+ let tx0 = transfer(&mint_keypair, &kp.pubkey(), 500, genesis_config.hash());
+
+ let bundle = ImmutableDeserializedBundle::new(
+ &mut PacketBundle {
+ batch: PacketBatch::new(vec![Packet::from_data(None, tx0).unwrap()]),
+ bundle_id: String::default(),
+ },
+ None,
+ )
+ .unwrap();
+
+ let mut transaction_errors = TransactionErrorMetrics::default();
+ assert_matches!(
+ bundle.build_sanitized_bundle(
+ &vote_only_bank,
+ &HashSet::default(),
+ &mut transaction_errors
+ ),
+ Err(DeserializedBundleError::VoteOnlyMode)
+ );
+ }
+
+ #[test]
+ fn test_duplicate_signature_fails_to_build() {
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let kp = Keypair::new();
+
+ let tx0 = transfer(&mint_keypair, &kp.pubkey(), 500, genesis_config.hash());
+
+ let bundle = ImmutableDeserializedBundle::new(
+ &mut PacketBundle {
+ batch: PacketBatch::new(vec![
+ Packet::from_data(None, &tx0).unwrap(),
+ Packet::from_data(None, &tx0).unwrap(),
+ ]),
+ bundle_id: String::default(),
+ },
+ None,
+ )
+ .unwrap();
+
+ let mut transaction_errors = TransactionErrorMetrics::default();
+ assert_matches!(
+ bundle.build_sanitized_bundle(&bank, &HashSet::default(), &mut transaction_errors),
+ Err(DeserializedBundleError::DuplicateTransaction)
+ );
+ }
+
+ #[test]
+ fn test_blacklisted_account_fails_to_build() {
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let kp = Keypair::new();
+
+ let tx0 = transfer(&mint_keypair, &kp.pubkey(), 500, genesis_config.hash());
+
+ let bundle = ImmutableDeserializedBundle::new(
+ &mut PacketBundle {
+ batch: PacketBatch::new(vec![Packet::from_data(None, tx0).unwrap()]),
+ bundle_id: String::default(),
+ },
+ None,
+ )
+ .unwrap();
+
+ let mut transaction_errors = TransactionErrorMetrics::default();
+ assert_matches!(
+ bundle.build_sanitized_bundle(
+ &bank,
+ &HashSet::from([kp.pubkey()]),
+ &mut transaction_errors
+ ),
+ Err(DeserializedBundleError::BlacklistedAccount)
+ );
+ }
+
+ #[test]
+ fn test_already_processed_tx_fails_to_build() {
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let kp = Keypair::new();
+
+ let tx0 = transfer(&mint_keypair, &kp.pubkey(), 500, genesis_config.hash());
+
+ bank.process_transaction(&tx0).unwrap();
+
+ let bundle = ImmutableDeserializedBundle::new(
+ &mut PacketBundle {
+ batch: PacketBatch::new(vec![Packet::from_data(None, tx0).unwrap()]),
+ bundle_id: String::default(),
+ },
+ None,
+ )
+ .unwrap();
+
+ let mut transaction_errors = TransactionErrorMetrics::default();
+ assert_matches!(
+ bundle.build_sanitized_bundle(&bank, &HashSet::default(), &mut transaction_errors),
+ Err(DeserializedBundleError::FailedCheckTransactions)
+ );
+ }
+
+ #[test]
+ fn test_bad_blockhash_fails_to_build() {
+ let GenesisConfigInfo {
+ genesis_config,
+ mint_keypair,
+ ..
+ } = create_genesis_config(10_000);
+ let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
+
+ let kp = Keypair::new();
+
+ let tx0 = transfer(&mint_keypair, &kp.pubkey(), 500, Hash::default());
+
+ let bundle = ImmutableDeserializedBundle::new(
+ &mut PacketBundle {
+ batch: PacketBatch::new(vec![Packet::from_data(None, tx0).unwrap()]),
+ bundle_id: String::default(),
+ },
+ None,
+ )
+ .unwrap();
+
+ let mut transaction_errors = TransactionErrorMetrics::default();
+ assert_matches!(
+ bundle.build_sanitized_bundle(&bank, &HashSet::default(), &mut transaction_errors),
+ Err(DeserializedBundleError::FailedCheckTransactions)
+ );
+ }
+}
diff --git a/core/src/lib.rs b/core/src/lib.rs
index 99ac98b5d422cc..79d039d3f1d6b1 100644
--- a/core/src/lib.rs
+++ b/core/src/lib.rs
@@ -12,22 +12,27 @@ pub mod accounts_hash_verifier;
pub mod admin_rpc_post_init;
pub mod banking_stage;
pub mod banking_trace;
+pub mod bundle_stage;
pub mod cache_block_meta_service;
pub mod cluster_info_vote_listener;
pub mod cluster_slots_service;
pub mod commitment_service;
pub mod completed_data_sets_service;
pub mod consensus;
+pub mod consensus_cache_updater;
pub mod cost_update_service;
pub mod drop_bank_service;
pub mod fetch_stage;
pub mod gen_keys;
+pub mod immutable_deserialized_bundle;
pub mod ledger_cleanup_service;
pub mod ledger_metric_report_service;
pub mod next_leader;
pub mod optimistic_confirmation_verifier;
+pub mod packet_bundle;
pub mod poh_timing_report_service;
pub mod poh_timing_reporter;
+pub mod proxy;
pub mod repair;
pub mod replay_stage;
mod result;
@@ -40,6 +45,7 @@ pub mod snapshot_packager_service;
pub mod staked_nodes_updater_service;
pub mod stats_reporter_service;
pub mod system_monitor_service;
+pub mod tip_manager;
pub mod tpu;
mod tpu_entry_notifier;
pub mod tracer_packet_stats;
@@ -70,3 +76,41 @@ extern crate solana_frozen_abi_macro;
#[cfg(test)]
#[macro_use]
extern crate assert_matches;
+
+use {
+ solana_sdk::packet::{Meta, Packet, PacketFlags, PACKET_DATA_SIZE},
+ std::{
+ cmp::min,
+ net::{IpAddr, Ipv4Addr},
+ },
+};
+
+const UNKNOWN_IP: IpAddr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
+
+// NOTE: last profiled at around 180ns
+pub fn proto_packet_to_packet(p: jito_protos::proto::packet::Packet) -> Packet {
+ let mut data = [0; PACKET_DATA_SIZE];
+ let copy_len = min(data.len(), p.data.len());
+ data[..copy_len].copy_from_slice(&p.data[..copy_len]);
+ let mut packet = Packet::new(data, Meta::default());
+ if let Some(meta) = p.meta {
+ packet.meta_mut().size = meta.size as usize;
+ packet.meta_mut().addr = meta.addr.parse().unwrap_or(UNKNOWN_IP);
+ packet.meta_mut().port = meta.port as u16;
+ if let Some(flags) = meta.flags {
+ if flags.simple_vote_tx {
+ packet.meta_mut().flags.insert(PacketFlags::SIMPLE_VOTE_TX);
+ }
+ if flags.forwarded {
+ packet.meta_mut().flags.insert(PacketFlags::FORWARDED);
+ }
+ if flags.tracer_packet {
+ packet.meta_mut().flags.insert(PacketFlags::TRACER_PACKET);
+ }
+ if flags.repair {
+ packet.meta_mut().flags.insert(PacketFlags::REPAIR);
+ }
+ }
+ }
+ packet
+}
diff --git a/core/src/packet_bundle.rs b/core/src/packet_bundle.rs
new file mode 100644
index 00000000000000..2158f374145af1
--- /dev/null
+++ b/core/src/packet_bundle.rs
@@ -0,0 +1,7 @@
+use solana_perf::packet::PacketBatch;
+
+#[derive(Clone, Debug)]
+pub struct PacketBundle {
+ pub batch: PacketBatch,
+ pub bundle_id: String,
+}
diff --git a/core/src/proxy/auth.rs b/core/src/proxy/auth.rs
new file mode 100644
index 00000000000000..39821e12ef13eb
--- /dev/null
+++ b/core/src/proxy/auth.rs
@@ -0,0 +1,185 @@
+use {
+ crate::proxy::ProxyError,
+ chrono::Utc,
+ jito_protos::proto::auth::{
+ auth_service_client::AuthServiceClient, GenerateAuthChallengeRequest,
+ GenerateAuthTokensRequest, RefreshAccessTokenRequest, Role, Token,
+ },
+ solana_gossip::cluster_info::ClusterInfo,
+ solana_sdk::signature::{Keypair, Signer},
+ std::{
+ sync::{Arc, Mutex},
+ time::Duration,
+ },
+ tokio::time::timeout,
+ tonic::{service::Interceptor, transport::Channel, Code, Request, Status},
+};
+
+/// Interceptor responsible for adding the access token to request headers.
+pub(crate) struct AuthInterceptor {
+ /// The token added to each request header.
+ access_token: Arc>,
+}
+
+impl AuthInterceptor {
+ pub(crate) fn new(access_token: Arc>) -> Self {
+ Self { access_token }
+ }
+}
+
+impl Interceptor for AuthInterceptor {
+ fn call(&mut self, mut request: Request<()>) -> Result, Status> {
+ request.metadata_mut().insert(
+ "authorization",
+ format!("Bearer {}", self.access_token.lock().unwrap().value)
+ .parse()
+ .unwrap(),
+ );
+
+ Ok(request)
+ }
+}
+
+/// Generates an auth challenge then generates and returns validated auth tokens.
+pub async fn generate_auth_tokens(
+ auth_service_client: &mut AuthServiceClient,
+ // used to sign challenges
+ keypair: &Keypair,
+) -> crate::proxy::Result<(
+ Token, /* access_token */
+ Token, /* refresh_token */
+)> {
+ debug!("generate_auth_challenge");
+ let challenge_response = auth_service_client
+ .generate_auth_challenge(GenerateAuthChallengeRequest {
+ role: Role::Validator as i32,
+ pubkey: keypair.pubkey().as_ref().to_vec(),
+ })
+ .await
+ .map_err(|e: Status| {
+ if e.code() == Code::PermissionDenied {
+ ProxyError::AuthenticationPermissionDenied
+ } else {
+ ProxyError::AuthenticationError(e.to_string())
+ }
+ })?;
+
+ let formatted_challenge = format!(
+ "{}-{}",
+ keypair.pubkey(),
+ challenge_response.into_inner().challenge
+ );
+
+ let signed_challenge = keypair
+ .sign_message(formatted_challenge.as_bytes())
+ .as_ref()
+ .to_vec();
+
+ debug!(
+ "formatted_challenge: {} signed_challenge: {:?}",
+ formatted_challenge, signed_challenge
+ );
+
+ debug!("generate_auth_tokens");
+ let auth_tokens = auth_service_client
+ .generate_auth_tokens(GenerateAuthTokensRequest {
+ challenge: formatted_challenge,
+ client_pubkey: keypair.pubkey().as_ref().to_vec(),
+ signed_challenge,
+ })
+ .await
+ .map_err(|e| ProxyError::AuthenticationError(e.to_string()))?;
+
+ let inner = auth_tokens.into_inner();
+ let access_token = get_validated_token(inner.access_token)?;
+ let refresh_token = get_validated_token(inner.refresh_token)?;
+
+ Ok((access_token, refresh_token))
+}
+
+/// Tries to refresh the access token or run full-reauth if needed.
+pub async fn maybe_refresh_auth_tokens(
+ auth_service_client: &mut AuthServiceClient