From d1e4c7c38b29233122da15826f7c19b7811f2211 Mon Sep 17 00:00:00 2001 From: tamirms Date: Wed, 20 Dec 2023 09:30:12 +0000 Subject: [PATCH 01/21] Remove captive core info request error logs (#5145) --- ingest/ledgerbackend/captive_core_backend.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/ingest/ledgerbackend/captive_core_backend.go b/ingest/ledgerbackend/captive_core_backend.go index aa9414b5d1..285247bffa 100644 --- a/ingest/ledgerbackend/captive_core_backend.go +++ b/ingest/ledgerbackend/captive_core_backend.go @@ -218,7 +218,6 @@ func (c *CaptiveStellarCore) coreSyncedMetric() float64 { info, err := c.stellarCoreClient.Info(c.config.Context) if err != nil { - c.config.Log.WithError(err).Warn("Cannot connect to Captive Stellar-Core HTTP server") return -1 } @@ -236,7 +235,6 @@ func (c *CaptiveStellarCore) coreVersionMetric() float64 { info, err := c.stellarCoreClient.Info(c.config.Context) if err != nil { - c.config.Log.WithError(err).Warn("Cannot connect to Captive Stellar-Core HTTP server") return -1 } From bd8533a96d3e094179ba1dd13766cf075f9ef0a2 Mon Sep 17 00:00:00 2001 From: tamirms Date: Fri, 5 Jan 2024 19:01:41 +0100 Subject: [PATCH 02/21] Fix captive core toml history entries (#5150) --- ingest/ledgerbackend/toml.go | 2 +- ingest/ledgerbackend/toml_test.go | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/ingest/ledgerbackend/toml.go b/ingest/ledgerbackend/toml.go index 7c42bc11c8..edbb137b8e 100644 --- a/ingest/ledgerbackend/toml.go +++ b/ingest/ledgerbackend/toml.go @@ -558,7 +558,7 @@ func (c *CaptiveCoreToml) setDefaults(params CaptiveCoreTomlParams) { for i, val := range params.HistoryArchiveURLs { name := fmt.Sprintf("HISTORY.h%d", i) c.HistoryEntries[c.tablePlaceholders.newPlaceholder(name)] = History{ - Get: fmt.Sprintf("curl -sf %s/{0} -o {1}", val), + Get: fmt.Sprintf("curl -sf %s/{0} -o {1}", strings.TrimSuffix(val, "/")), } } } diff --git a/ingest/ledgerbackend/toml_test.go b/ingest/ledgerbackend/toml_test.go index 476a2ea953..c5d40c77e3 100644 --- a/ingest/ledgerbackend/toml_test.go +++ b/ingest/ledgerbackend/toml_test.go @@ -395,6 +395,28 @@ func TestGenerateConfig(t *testing.T) { } } +func TestHistoryArchiveURLTrailingSlash(t *testing.T) { + httpPort := uint(8000) + peerPort := uint(8000) + logPath := "logPath" + + params := CaptiveCoreTomlParams{ + NetworkPassphrase: "Public Global Stellar Network ; September 2015", + HistoryArchiveURLs: []string{"http://localhost:1170/"}, + HTTPPort: &httpPort, + PeerPort: &peerPort, + LogPath: &logPath, + Strict: false, + } + + captiveCoreToml, err := NewCaptiveCoreToml(params) + assert.NoError(t, err) + assert.Len(t, captiveCoreToml.HistoryEntries, 1) + for _, entry := range captiveCoreToml.HistoryEntries { + assert.Equal(t, "curl -sf http://localhost:1170/{0} -o {1}", entry.Get) + } +} + func TestExternalStorageConfigUsesDatabaseToml(t *testing.T) { var err error var captiveCoreToml *CaptiveCoreToml From 495d18cb0d987ca119ade4b3174d6deb40a273ee Mon Sep 17 00:00:00 2001 From: shawn Date: Mon, 8 Jan 2024 13:56:51 -0800 Subject: [PATCH 03/21] #5152: changed the 'Processed ledger' log output from streamLedger to be different phrase to avoid conflict with existing 'Processed ledger' log output from fsm (#5155) --- services/horizon/internal/ingest/processor_runner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/horizon/internal/ingest/processor_runner.go b/services/horizon/internal/ingest/processor_runner.go index ed066a20d2..34b977c03e 100644 --- a/services/horizon/internal/ingest/processor_runner.go +++ b/services/horizon/internal/ingest/processor_runner.go @@ -353,7 +353,7 @@ func (s *ProcessorRunner) streamLedger(ledger xdr.LedgerCloseMeta, "ledger": true, "commit": false, "duration": time.Since(startTime).Seconds(), - }).Info("Processed ledger") + }).Info("Transaction processors finished for ledger") return nil } From 428a0be23fa3ca2d45043d1671eeddc93e2ccf20 Mon Sep 17 00:00:00 2001 From: shawn Date: Thu, 11 Jan 2024 13:06:07 -0800 Subject: [PATCH 04/21] services/horizon/ingest: removed legacy core cursor update against during ledger ingestion (#5158) --- services/horizon/CHANGELOG.md | 5 +- services/horizon/cmd/db.go | 2 - services/horizon/cmd/ingest.go | 3 - services/horizon/internal/config.go | 9 -- services/horizon/internal/flags.go | 28 +++--- services/horizon/internal/flags_test.go | 70 ++++++++++++++ .../internal/ingest/build_state_test.go | 32 ------- .../internal/ingest/db_integration_test.go | 1 - services/horizon/internal/ingest/fsm.go | 19 ---- services/horizon/internal/ingest/main.go | 91 ++++--------------- services/horizon/internal/ingest/main_test.go | 1 - services/horizon/internal/ingest/parallel.go | 3 - .../internal/ingest/resume_state_test.go | 68 -------------- services/horizon/internal/init.go | 4 - .../internal/integration/parameters_test.go | 78 ---------------- services/horizon/internal/test/db/main.go | 6 ++ services/horizon/internal/test/main.go | 11 ++- services/horizon/internal/test/t.go | 14 +-- 18 files changed, 124 insertions(+), 321 deletions(-) diff --git a/services/horizon/CHANGELOG.md b/services/horizon/CHANGELOG.md index c3c5705464..962fca7128 100644 --- a/services/horizon/CHANGELOG.md +++ b/services/horizon/CHANGELOG.md @@ -8,7 +8,10 @@ file. This project adheres to [Semantic Versioning](http://semver.org/). ### Added - Add a deprecation warning for using command-line flags when running Horizon ([5051](https://github.com/stellar/go/pull/5051)) -- Deprecate configuration flags related to legacy non-captive core ingestion ([5100](https://github.com/stellar/go/pull/5100)) + +### Breaking Changes +- Removed configuration flags `--stellar-core-url-db`, `--cursor-name` `--skip-cursor-update` , they were related to legacy non-captive core ingestion and are no longer usable. + ## 2.27.0 ### Fixed diff --git a/services/horizon/cmd/db.go b/services/horizon/cmd/db.go index a83597932e..a0d0e6c518 100644 --- a/services/horizon/cmd/db.go +++ b/services/horizon/cmd/db.go @@ -413,10 +413,8 @@ func runDBReingestRange(ledgerRanges []history.LedgerRange, reingestForce bool, ReingestRetryBackoffSeconds: int(retryBackoffSeconds), CaptiveCoreBinaryPath: config.CaptiveCoreBinaryPath, CaptiveCoreConfigUseDB: config.CaptiveCoreConfigUseDB, - RemoteCaptiveCoreURL: config.RemoteCaptiveCoreURL, CaptiveCoreToml: config.CaptiveCoreToml, CaptiveCoreStoragePath: config.CaptiveCoreStoragePath, - StellarCoreCursor: config.CursorName, StellarCoreURL: config.StellarCoreURL, RoundingSlippageFilter: config.RoundingSlippageFilter, EnableIngestionFiltering: config.EnableIngestionFiltering, diff --git a/services/horizon/cmd/ingest.go b/services/horizon/cmd/ingest.go index e2d38977ab..3833dba7fd 100644 --- a/services/horizon/cmd/ingest.go +++ b/services/horizon/cmd/ingest.go @@ -130,7 +130,6 @@ var ingestVerifyRangeCmd = &cobra.Command{ HistoryArchiveURLs: globalConfig.HistoryArchiveURLs, CaptiveCoreBinaryPath: globalConfig.CaptiveCoreBinaryPath, CaptiveCoreConfigUseDB: globalConfig.CaptiveCoreConfigUseDB, - RemoteCaptiveCoreURL: globalConfig.RemoteCaptiveCoreURL, CheckpointFrequency: globalConfig.CheckpointFrequency, CaptiveCoreToml: globalConfig.CaptiveCoreToml, CaptiveCoreStoragePath: globalConfig.CaptiveCoreStoragePath, @@ -213,7 +212,6 @@ var ingestStressTestCmd = &cobra.Command{ HistoryArchiveURLs: globalConfig.HistoryArchiveURLs, RoundingSlippageFilter: globalConfig.RoundingSlippageFilter, CaptiveCoreBinaryPath: globalConfig.CaptiveCoreBinaryPath, - RemoteCaptiveCoreURL: globalConfig.RemoteCaptiveCoreURL, CaptiveCoreConfigUseDB: globalConfig.CaptiveCoreConfigUseDB, } @@ -353,7 +351,6 @@ var ingestBuildStateCmd = &cobra.Command{ HistoryArchiveURLs: globalConfig.HistoryArchiveURLs, CaptiveCoreBinaryPath: globalConfig.CaptiveCoreBinaryPath, CaptiveCoreConfigUseDB: globalConfig.CaptiveCoreConfigUseDB, - RemoteCaptiveCoreURL: globalConfig.RemoteCaptiveCoreURL, CheckpointFrequency: globalConfig.CheckpointFrequency, CaptiveCoreToml: globalConfig.CaptiveCoreToml, CaptiveCoreStoragePath: globalConfig.CaptiveCoreStoragePath, diff --git a/services/horizon/internal/config.go b/services/horizon/internal/config.go index 1cc14b4900..7454f52bb7 100644 --- a/services/horizon/internal/config.go +++ b/services/horizon/internal/config.go @@ -21,7 +21,6 @@ type Config struct { EnableIngestionFiltering bool CaptiveCoreBinaryPath string - RemoteCaptiveCoreURL string CaptiveCoreConfigPath string CaptiveCoreTomlParams ledgerbackend.CaptiveCoreTomlParams CaptiveCoreToml *ledgerbackend.CaptiveCoreToml @@ -68,11 +67,6 @@ type Config struct { TLSKey string // Ingest toggles whether this horizon instance should run the data ingestion subsystem. Ingest bool - // CursorName is the cursor used for ingesting from stellar-core. - // Setting multiple cursors in different Horizon instances allows multiple - // Horizons to ingest from the same stellar-core instance without cursor - // collisions. - CursorName string // HistoryRetentionCount represents the minimum number of ledgers worth of // history data to retain in the horizon database. For the purposes of // determining a "retention duration", each ledger roughly corresponds to 10 @@ -82,9 +76,6 @@ type Config struct { // out-of-date by before horizon begins to respond with an error to history // requests. StaleThreshold uint - // SkipCursorUpdate causes the ingestor to skip reporting the "last imported - // ledger" state to stellar-core. - SkipCursorUpdate bool // IngestDisableStateVerification disables state verification // `System.verifyState()` when set to `true`. IngestDisableStateVerification bool diff --git a/services/horizon/internal/flags.go b/services/horizon/internal/flags.go index e2783680fd..40bfc08afe 100644 --- a/services/horizon/internal/flags.go +++ b/services/horizon/internal/flags.go @@ -338,11 +338,7 @@ func Flags() (*Config, support.ConfigOptions) { Hidden: true, CustomSetValue: func(opt *support.ConfigOption) error { if val := viper.GetString(opt.Name); val != "" { - stdLog.Printf( - "DEPRECATED - The usage of the flag --stellar-core-db-url has been deprecated. " + - "Horizon now uses Captive-Core ingestion by default and this flag will soon be removed in " + - "the future.", - ) + return fmt.Errorf("flag --stellar-core-db-url and environment variable STELLAR_CORE_DATABASE_URL have been removed and no longer valid, must use captive core configuration for ingestion") } return nil }, @@ -595,11 +591,15 @@ func Flags() (*Config, support.ConfigOptions) { &support.ConfigOption{ Name: "cursor-name", EnvVar: "CURSOR_NAME", - ConfigKey: &config.CursorName, OptType: types.String, - FlagDefault: "HORIZON", - Usage: "ingestor cursor used by horizon to ingest from stellar core. must be uppercase and unique for each horizon instance ingesting from that core instance.", + Hidden: true, UsedInCommands: IngestionCommands, + CustomSetValue: func(opt *support.ConfigOption) error { + if val := viper.GetString(opt.Name); val != "" { + return fmt.Errorf("flag --cursor-name has been removed and no longer valid, must use captive core configuration for ingestion") + } + return nil + }, }, &support.ConfigOption{ Name: "history-retention-count", @@ -619,11 +619,15 @@ func Flags() (*Config, support.ConfigOptions) { }, &support.ConfigOption{ Name: "skip-cursor-update", - ConfigKey: &config.SkipCursorUpdate, - OptType: types.Bool, - FlagDefault: false, - Usage: "causes the ingester to skip reporting the last imported ledger state to stellar-core", + OptType: types.String, + Hidden: true, UsedInCommands: IngestionCommands, + CustomSetValue: func(opt *support.ConfigOption) error { + if val := viper.GetString(opt.Name); val != "" { + return fmt.Errorf("flag --skip-cursor-update has been removed and no longer valid, must use captive core configuration for ingestion") + } + return nil + }, }, &support.ConfigOption{ Name: "ingest-disable-state-verification", diff --git a/services/horizon/internal/flags_test.go b/services/horizon/internal/flags_test.go index b2e617bc00..ef2d5d3a02 100644 --- a/services/horizon/internal/flags_test.go +++ b/services/horizon/internal/flags_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/spf13/cobra" + "github.com/stellar/go/services/horizon/internal/test" "github.com/stretchr/testify/assert" @@ -259,3 +260,72 @@ func TestEnvironmentVariables(t *testing.T) { assert.Equal(t, config.CaptiveCoreConfigPath, "../docker/captive-core-classic-integration-tests.cfg") assert.Equal(t, config.CaptiveCoreConfigUseDB, true) } + +func TestRemovedFlags(t *testing.T) { + tests := []struct { + name string + environmentVars map[string]string + errStr string + cmdArgs []string + }{ + { + name: "STELLAR_CORE_DATABASE_URL removed", + environmentVars: map[string]string{ + "INGEST": "false", + "STELLAR_CORE_DATABASE_URL": "coredb", + "DATABASE_URL": "dburl", + }, + errStr: "flag --stellar-core-db-url and environment variable STELLAR_CORE_DATABASE_URL have been removed and no longer valid, must use captive core configuration for ingestion", + }, + { + name: "--stellar-core-db-url removed", + environmentVars: map[string]string{ + "INGEST": "false", + "DATABASE_URL": "dburl", + }, + errStr: "flag --stellar-core-db-url and environment variable STELLAR_CORE_DATABASE_URL have been removed and no longer valid, must use captive core configuration for ingestion", + cmdArgs: []string{"--stellar-core-db-url=coredb"}, + }, + { + name: "CURSOR_NAME removed", + environmentVars: map[string]string{ + "INGEST": "false", + "CURSOR_NAME": "cursor", + "DATABASE_URL": "dburl", + }, + errStr: "flag --cursor-name has been removed and no longer valid, must use captive core configuration for ingestion", + }, + { + name: "SKIP_CURSOR_UPDATE removed", + environmentVars: map[string]string{ + "INGEST": "false", + "SKIP_CURSOR_UPDATE": "true", + "DATABASE_URL": "dburl", + }, + errStr: "flag --skip-cursor-update has been removed and no longer valid, must use captive core configuration for ingestion", + }, + } + + envManager := test.NewEnvironmentManager() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + defer func() { + envManager.Restore() + }() + err := envManager.InitializeEnvironmentVariables(tt.environmentVars) + require.NoError(t, err) + + config, flags := Flags() + testCmd := &cobra.Command{ + Use: "test", + } + + require.NoError(t, flags.Init(testCmd)) + require.NoError(t, testCmd.ParseFlags(tt.cmdArgs)) + + err = ApplyFlags(config, flags, ApplyOptions{}) + require.Error(t, err) + assert.Equal(t, tt.errStr, err.Error()) + }) + } +} diff --git a/services/horizon/internal/ingest/build_state_test.go b/services/horizon/internal/ingest/build_state_test.go index 7e03818795..d1409182d9 100644 --- a/services/horizon/internal/ingest/build_state_test.go +++ b/services/horizon/internal/ingest/build_state_test.go @@ -10,7 +10,6 @@ import ( "github.com/stellar/go/ingest/ledgerbackend" "github.com/stellar/go/support/errors" "github.com/stellar/go/xdr" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" ) @@ -83,12 +82,6 @@ func (s *BuildStateTestSuite) mockCommonHistoryQ() { s.historyQ.On("UpdateLastLedgerIngest", s.ctx, s.lastLedger).Return(nil).Once() s.historyQ.On("UpdateExpStateInvalid", s.ctx, false).Return(nil).Once() s.historyQ.On("TruncateIngestStateTables", s.ctx).Return(nil).Once() - s.stellarCoreClient.On( - "SetCursor", - mock.AnythingOfType("*context.timerCtx"), - defaultCoreCursorName, - int32(62), - ).Return(nil).Once() } func (s *BuildStateTestSuite) TestCheckPointLedgerIsZero() { @@ -175,12 +168,6 @@ func (s *BuildStateTestSuite) TestUpdateLastLedgerIngestReturnsError() { s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(s.lastLedger, nil).Once() s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once() s.historyQ.On("UpdateLastLedgerIngest", s.ctx, s.lastLedger).Return(errors.New("my error")).Once() - s.stellarCoreClient.On( - "SetCursor", - mock.AnythingOfType("*context.timerCtx"), - defaultCoreCursorName, - int32(62), - ).Return(nil).Once() next, err := buildState{checkpointLedger: s.checkpointLedger}.run(s.system) @@ -194,12 +181,6 @@ func (s *BuildStateTestSuite) TestUpdateExpStateInvalidReturnsError() { s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once() s.historyQ.On("UpdateLastLedgerIngest", s.ctx, s.lastLedger).Return(nil).Once() s.historyQ.On("UpdateExpStateInvalid", s.ctx, false).Return(errors.New("my error")).Once() - s.stellarCoreClient.On( - "SetCursor", - mock.AnythingOfType("*context.timerCtx"), - defaultCoreCursorName, - int32(62), - ).Return(nil).Once() next, err := buildState{checkpointLedger: s.checkpointLedger}.run(s.system) @@ -215,13 +196,6 @@ func (s *BuildStateTestSuite) TestTruncateIngestStateTablesReturnsError() { s.historyQ.On("UpdateExpStateInvalid", s.ctx, false).Return(nil).Once() s.historyQ.On("TruncateIngestStateTables", s.ctx).Return(errors.New("my error")).Once() - s.stellarCoreClient.On( - "SetCursor", - mock.AnythingOfType("*context.timerCtx"), - defaultCoreCursorName, - int32(62), - ).Return(nil).Once() - next, err := buildState{checkpointLedger: s.checkpointLedger}.run(s.system) s.Assert().Error(err) @@ -251,12 +225,6 @@ func (s *BuildStateTestSuite) TestRunHistoryArchiveIngestionGenesisReturnsError( s.historyQ.On("UpdateLastLedgerIngest", s.ctx, uint32(0)).Return(nil).Once() s.historyQ.On("UpdateExpStateInvalid", s.ctx, false).Return(nil).Once() s.historyQ.On("TruncateIngestStateTables", s.ctx).Return(nil).Once() - s.stellarCoreClient.On( - "SetCursor", - mock.AnythingOfType("*context.timerCtx"), - defaultCoreCursorName, - int32(0), - ).Return(nil).Once() s.runner. On("RunGenesisStateIngestion"). diff --git a/services/horizon/internal/ingest/db_integration_test.go b/services/horizon/internal/ingest/db_integration_test.go index 86576db137..60a45f158e 100644 --- a/services/horizon/internal/ingest/db_integration_test.go +++ b/services/horizon/internal/ingest/db_integration_test.go @@ -81,7 +81,6 @@ func (s *DBTestSuite) SetupTest() { s.historyAdapter = &mockHistoryArchiveAdapter{} var err error sIface, err := NewSystem(Config{ - CoreSession: s.tt.CoreSession(), HistorySession: s.tt.HorizonSession(), HistoryArchiveURLs: []string{"http://ignore.test"}, DisableStateVerification: false, diff --git a/services/horizon/internal/ingest/fsm.go b/services/horizon/internal/ingest/fsm.go index f5b4f94456..3cc6d31c7d 100644 --- a/services/horizon/internal/ingest/fsm.go +++ b/services/horizon/internal/ingest/fsm.go @@ -326,11 +326,6 @@ func (b buildState) run(s *system) (transition, error) { return nextFailState, nil } - if err = s.updateCursor(b.checkpointLedger - 1); err != nil { - // Don't return updateCursor error. - log.WithError(err).Warn("error updating stellar-core cursor") - } - log.Info("Starting ingestion system from empty state...") // Clear last_ingested_ledger in key value store @@ -454,14 +449,6 @@ func (r resumeState) run(s *system) (transition, error) { WithField("lastIngestedLedger", lastIngestedLedger). Info("bumping ingest ledger to next ledger after ingested ledger in db") - // Update cursor if there's more than one ingesting instance: either - // Captive-Core or DB ingestion connected to another Stellar-Core. - // remove now? - if err = s.updateCursor(lastIngestedLedger); err != nil { - // Don't return updateCursor error. - log.WithError(err).Warn("error updating stellar-core cursor") - } - // resume immediately so Captive-Core catchup is not slowed down return resumeImmediately(lastIngestedLedger), nil } @@ -522,12 +509,6 @@ func (r resumeState) run(s *system) (transition, error) { return retryResume(r), err } - //TODO remove now? stellar-core-db-url is removed - if err = s.updateCursor(ingestLedger); err != nil { - // Don't return updateCursor error. - log.WithError(err).Warn("error updating stellar-core cursor") - } - duration = time.Since(startTime).Seconds() s.Metrics().LedgerIngestionDuration.Observe(float64(duration)) diff --git a/services/horizon/internal/ingest/main.go b/services/horizon/internal/ingest/main.go index 2cf067441e..13f7017cbf 100644 --- a/services/horizon/internal/ingest/main.go +++ b/services/horizon/internal/ingest/main.go @@ -79,14 +79,11 @@ const ( var log = logpkg.DefaultLogger.WithField("service", "ingest") type Config struct { - CoreSession db.SessionInterface StellarCoreURL string - StellarCoreCursor string CaptiveCoreBinaryPath string CaptiveCoreStoragePath string CaptiveCoreToml *ledgerbackend.CaptiveCoreToml CaptiveCoreConfigUseDB bool - RemoteCaptiveCoreURL string NetworkPassphrase string HistorySession db.SessionInterface @@ -111,19 +108,6 @@ type Config struct { MaxLedgerPerFlush uint32 } -// LocalCaptiveCoreEnabled returns true if configured to run -// a local captive core instance for ingestion. -func (c Config) LocalCaptiveCoreEnabled() bool { - // c.RemoteCaptiveCoreURL is always empty when running local captive core. - return c.RemoteCaptiveCoreURL == "" -} - -// RemoteCaptiveCoreEnabled returns true if configured to run -// a remote captive core instance for ingestion. -func (c Config) RemoteCaptiveCoreEnabled() bool { - return c.RemoteCaptiveCoreURL != "" -} - const ( getLastIngestedErrMsg string = "Error getting last ingested ledger" getIngestVersionErrMsg string = "Error getting ingestion version" @@ -245,41 +229,26 @@ func NewSystem(config Config) (System, error) { return nil, errors.Wrap(err, "error creating history archive") } - var ledgerBackend ledgerbackend.LedgerBackend - if config.RemoteCaptiveCoreEnabled() { - ledgerBackend, err = ledgerbackend.NewRemoteCaptive(config.RemoteCaptiveCoreURL) - if err != nil { - cancel() - return nil, errors.Wrap(err, "error creating captive core backend") - } - } else if config.LocalCaptiveCoreEnabled() { - logger := log.WithField("subservice", "stellar-core") - ledgerBackend, err = ledgerbackend.NewCaptive( - ledgerbackend.CaptiveCoreConfig{ - BinaryPath: config.CaptiveCoreBinaryPath, - StoragePath: config.CaptiveCoreStoragePath, - UseDB: config.CaptiveCoreConfigUseDB, - Toml: config.CaptiveCoreToml, - NetworkPassphrase: config.NetworkPassphrase, - HistoryArchiveURLs: config.HistoryArchiveURLs, - CheckpointFrequency: config.CheckpointFrequency, - LedgerHashStore: ledgerbackend.NewHorizonDBLedgerHashStore(config.HistorySession), - Log: logger, - Context: ctx, - UserAgent: fmt.Sprintf("captivecore horizon/%s golang/%s", apkg.Version(), runtime.Version()), - }, - ) - if err != nil { - cancel() - return nil, errors.Wrap(err, "error creating captive core backend") - } - } else { - coreSession := config.CoreSession.Clone() - ledgerBackend, err = ledgerbackend.NewDatabaseBackendFromSession(coreSession, config.NetworkPassphrase) - if err != nil { - cancel() - return nil, errors.Wrap(err, "error creating ledger backend") - } + // the only ingest option is local captive core config + logger := log.WithField("subservice", "stellar-core") + ledgerBackend, err := ledgerbackend.NewCaptive( + ledgerbackend.CaptiveCoreConfig{ + BinaryPath: config.CaptiveCoreBinaryPath, + StoragePath: config.CaptiveCoreStoragePath, + UseDB: config.CaptiveCoreConfigUseDB, + Toml: config.CaptiveCoreToml, + NetworkPassphrase: config.NetworkPassphrase, + HistoryArchiveURLs: config.HistoryArchiveURLs, + CheckpointFrequency: config.CheckpointFrequency, + LedgerHashStore: ledgerbackend.NewHorizonDBLedgerHashStore(config.HistorySession), + Log: logger, + Context: ctx, + UserAgent: fmt.Sprintf("captivecore horizon/%s golang/%s", apkg.Version(), runtime.Version()), + }, + ) + if err != nil { + cancel() + return nil, errors.Wrap(err, "error creating captive core backend") } historyQ := &history.Q{config.HistorySession.Clone()} @@ -752,26 +721,6 @@ func (s *system) resetStateVerificationErrors() { s.stateVerificationErrors = 0 } -func (s *system) updateCursor(ledgerSequence uint32) error { - if s.stellarCoreClient == nil { - return nil - } - - cursor := defaultCoreCursorName - if s.config.StellarCoreCursor != "" { - cursor = s.config.StellarCoreCursor - } - - ctx, cancel := context.WithTimeout(s.ctx, time.Second) - defer cancel() - err := s.stellarCoreClient.SetCursor(ctx, cursor, int32(ledgerSequence)) - if err != nil { - return errors.Wrap(err, "Setting stellar-core cursor failed") - } - - return nil -} - func (s *system) Shutdown() { log.Info("Shutting down ingestion system...") s.stateVerificationMutex.Lock() diff --git a/services/horizon/internal/ingest/main_test.go b/services/horizon/internal/ingest/main_test.go index 55860eeaff..460c27e062 100644 --- a/services/horizon/internal/ingest/main_test.go +++ b/services/horizon/internal/ingest/main_test.go @@ -90,7 +90,6 @@ func TestLedgerEligibleForStateVerification(t *testing.T) { func TestNewSystem(t *testing.T) { config := Config{ - CoreSession: &db.Session{DB: &sqlx.DB{}}, HistorySession: &db.Session{DB: &sqlx.DB{}}, DisableStateVerification: true, HistoryArchiveURLs: []string{"https://history.stellar.org/prd/core-live/core_live_001"}, diff --git a/services/horizon/internal/ingest/parallel.go b/services/horizon/internal/ingest/parallel.go index b3c163689d..525f153b81 100644 --- a/services/horizon/internal/ingest/parallel.go +++ b/services/horizon/internal/ingest/parallel.go @@ -52,9 +52,6 @@ func (ps *ParallelSystems) Shutdown() { if ps.config.HistorySession != nil { ps.config.HistorySession.Close() } - if ps.config.CoreSession != nil { - ps.config.CoreSession.Close() - } } func (ps *ParallelSystems) runReingestWorker(s System, stop <-chan struct{}, reingestJobQueue <-chan history.LedgerRange) rangeError { diff --git a/services/horizon/internal/ingest/resume_state_test.go b/services/horizon/internal/ingest/resume_state_test.go index 82a7869d4b..013f176ae8 100644 --- a/services/horizon/internal/ingest/resume_state_test.go +++ b/services/horizon/internal/ingest/resume_state_test.go @@ -273,14 +273,6 @@ func (s *ResumeTestTestSuite) mockSuccessfulIngestion() { s.historyQ.On("UpdateLastLedgerIngest", s.ctx, uint32(101)).Return(nil).Once() s.historyQ.On("Commit").Return(nil).Once() s.historyQ.On("RebuildTradeAggregationBuckets", s.ctx, uint32(101), uint32(101), 0).Return(nil).Once() - - s.stellarCoreClient.On( - "SetCursor", - mock.AnythingOfType("*context.timerCtx"), - defaultCoreCursorName, - int32(101), - ).Return(nil).Once() - s.historyQ.On("GetExpStateInvalid", s.ctx).Return(false, nil).Once() } func (s *ResumeTestTestSuite) TestBumpIngestLedger() { @@ -303,13 +295,6 @@ func (s *ResumeTestTestSuite) TestBumpIngestLedger() { s.historyQ.On("Begin", s.ctx).Return(nil).Once() s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(101), nil).Once() - s.stellarCoreClient.On( - "SetCursor", - mock.AnythingOfType("*context.timerCtx"), - defaultCoreCursorName, - int32(101), - ).Return(errors.New("my error")).Once() - next, err := resumeState{latestSuccessfullyProcessedLedger: 99}.run(s.system) s.Assert().NoError(err) s.Assert().Equal( @@ -335,45 +320,6 @@ func (s *ResumeTestTestSuite) TestIngestAllMasterNode() { ) } -func (s *ResumeTestTestSuite) TestErrorSettingCursorIgnored() { - s.historyQ.On("Begin", s.ctx).Return(nil).Once() - s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(100), nil).Once() - s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once() - s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(100), nil) - - s.runner.On("RunAllProcessorsOnLedger", mock.AnythingOfType("xdr.LedgerCloseMeta")). - Run(func(args mock.Arguments) { - meta := args.Get(0).(xdr.LedgerCloseMeta) - s.Assert().Equal(uint32(101), meta.LedgerSequence()) - }). - Return( - ledgerStats{}, - nil, - ).Once() - s.historyQ.On("UpdateLastLedgerIngest", s.ctx, uint32(101)).Return(nil).Once() - s.historyQ.On("Commit").Return(nil).Once() - - s.stellarCoreClient.On( - "SetCursor", - mock.AnythingOfType("*context.timerCtx"), - defaultCoreCursorName, - int32(101), - ).Return(errors.New("my error")).Once() - - s.historyQ.On("GetExpStateInvalid", s.ctx).Return(false, nil).Once() - s.historyQ.On("RebuildTradeAggregationBuckets", s.ctx, uint32(101), uint32(101), 0).Return(nil).Once() - - next, err := resumeState{latestSuccessfullyProcessedLedger: 100}.run(s.system) - s.Assert().NoError(err) - s.Assert().Equal( - transition{ - node: resumeState{latestSuccessfullyProcessedLedger: 101}, - sleepDuration: 0, - }, - next, - ) -} - func (s *ResumeTestTestSuite) TestRebuildTradeAggregationBucketsError() { s.historyQ.On("Begin", s.ctx).Return(nil).Once() s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(100), nil).Once() @@ -422,13 +368,6 @@ func (s *ResumeTestTestSuite) TestReapingObjectsDisabled() { s.historyQ.On("UpdateLastLedgerIngest", s.ctx, uint32(101)).Return(nil).Once() s.historyQ.On("Commit").Return(nil).Once() - s.stellarCoreClient.On( - "SetCursor", - mock.AnythingOfType("*context.timerCtx"), - defaultCoreCursorName, - int32(101), - ).Return(nil).Once() - s.historyQ.On("GetExpStateInvalid", s.ctx).Return(false, nil).Once() s.historyQ.On("RebuildTradeAggregationBuckets", s.ctx, uint32(101), uint32(101), 0).Return(nil).Once() // Reap lookup tables not executed @@ -466,13 +405,6 @@ func (s *ResumeTestTestSuite) TestErrorReapingObjectsIgnored() { s.historyQ.On("UpdateLastLedgerIngest", s.ctx, uint32(101)).Return(nil).Once() s.historyQ.On("Commit").Return(nil).Once() - s.stellarCoreClient.On( - "SetCursor", - mock.AnythingOfType("*context.timerCtx"), - defaultCoreCursorName, - int32(101), - ).Return(nil).Once() - s.historyQ.On("GetExpStateInvalid", s.ctx).Return(false, nil).Once() s.historyQ.On("RebuildTradeAggregationBuckets", s.ctx, uint32(101), uint32(101), 0).Return(nil).Once() // Reap lookup tables: diff --git a/services/horizon/internal/init.go b/services/horizon/internal/init.go index 5d38c86ccf..1b6664b8ba 100644 --- a/services/horizon/internal/init.go +++ b/services/horizon/internal/init.go @@ -91,9 +91,7 @@ func mustInitHorizonDB(app *App) { func initIngester(app *App) { var err error - var coreSession db.SessionInterface app.ingester, err = ingest.NewSystem(ingest.Config{ - CoreSession: coreSession, HistorySession: mustNewDBSession( db.IngestSubservice, app.config.DatabaseURL, ingest.MaxDBConnections, ingest.MaxDBConnections, app.prometheusRegistry, ), @@ -101,12 +99,10 @@ func initIngester(app *App) { HistoryArchiveURLs: app.config.HistoryArchiveURLs, CheckpointFrequency: app.config.CheckpointFrequency, StellarCoreURL: app.config.StellarCoreURL, - StellarCoreCursor: app.config.CursorName, CaptiveCoreBinaryPath: app.config.CaptiveCoreBinaryPath, CaptiveCoreStoragePath: app.config.CaptiveCoreStoragePath, CaptiveCoreConfigUseDB: app.config.CaptiveCoreConfigUseDB, CaptiveCoreToml: app.config.CaptiveCoreToml, - RemoteCaptiveCoreURL: app.config.RemoteCaptiveCoreURL, DisableStateVerification: app.config.IngestDisableStateVerification, StateVerificationCheckpointFrequency: uint32(app.config.IngestStateVerificationCheckpointFrequency), StateVerificationTimeout: app.config.IngestStateVerificationTimeout, diff --git a/services/horizon/internal/integration/parameters_test.go b/services/horizon/internal/integration/parameters_test.go index 97fab268bc..ebe3c3bfda 100644 --- a/services/horizon/internal/integration/parameters_test.go +++ b/services/horizon/internal/integration/parameters_test.go @@ -541,84 +541,6 @@ func TestDeprecatedOutputs(t *testing.T) { "Configuring section in the developer documentation on how to use them - "+ "https://developers.stellar.org/docs/run-api-server/configuring") }) - t.Run("deprecated output for --stellar-core-db-url and --enable-captive-core-ingestion", func(t *testing.T) { - originalStderr := os.Stderr - r, w, _ := os.Pipe() - os.Stderr = w - stdLog.SetOutput(os.Stderr) - - testConfig := integration.GetTestConfig() - testConfig.HorizonIngestParameters = map[string]string{ - "stellar-core-db-url": "temp-url", - "enable-captive-core-ingestion": "true", - } - test := integration.NewTest(t, *testConfig) - err := test.StartHorizon() - assert.NoError(t, err) - test.WaitForHorizon() - - // Use a wait group to wait for the goroutine to finish before proceeding - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - if err := w.Close(); err != nil { - t.Errorf("Failed to close Stdout") - return - } - }() - - outputBytes, _ := io.ReadAll(r) - wg.Wait() // Wait for the goroutine to finish before proceeding - _ = r.Close() - os.Stderr = originalStderr - - assert.Contains(t, string(outputBytes), "DEPRECATED - The usage of the flag --stellar-core-db-url has been deprecated. "+ - "Horizon now uses Captive-Core ingestion by default and this flag will soon be removed in "+ - "the future.") - assert.Contains(t, string(outputBytes), "DEPRECATED - The usage of the flag --enable-captive-core-ingestion has been deprecated. "+ - "Horizon now uses Captive-Core ingestion by default and this flag will soon be removed in "+ - "the future.") - }) - t.Run("deprecated output for env vars STELLAR_CORE_DATABASE_URL and ENABLE_CAPTIVE_CORE_INGESTION", func(t *testing.T) { - originalStderr := os.Stderr - r, w, _ := os.Pipe() - os.Stderr = w - stdLog.SetOutput(os.Stderr) - - testConfig := integration.GetTestConfig() - testConfig.HorizonEnvironment = map[string]string{ - "STELLAR_CORE_DATABASE_URL": "temp-url", - "ENABLE_CAPTIVE_CORE_INGESTION": "true", - } - test := integration.NewTest(t, *testConfig) - err := test.StartHorizon() - assert.NoError(t, err) - test.WaitForHorizon() - - // Use a wait group to wait for the goroutine to finish before proceeding - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - if err := w.Close(); err != nil { - t.Errorf("Failed to close Stdout") - return - } - }() - - outputBytes, _ := io.ReadAll(r) - wg.Wait() // Wait for the goroutine to finish before proceeding - _ = r.Close() - os.Stderr = originalStderr - - assert.Contains(t, string(outputBytes), "DEPRECATED - The usage of the flag --stellar-core-db-url has been deprecated. "+ - "Horizon now uses Captive-Core ingestion by default and this flag will soon be removed in "+ - "the future.") - assert.Contains(t, string(outputBytes), "DEPRECATED - The usage of the flag --enable-captive-core-ingestion has been deprecated. "+ - "Horizon now uses Captive-Core ingestion by default and this flag will soon be removed in "+ - "the future.") - }) } func TestGlobalFlagsOutput(t *testing.T) { diff --git a/services/horizon/internal/test/db/main.go b/services/horizon/internal/test/db/main.go index 4156ec25fb..6114a677ff 100644 --- a/services/horizon/internal/test/db/main.go +++ b/services/horizon/internal/test/db/main.go @@ -29,6 +29,8 @@ func horizonPostgres(t *testing.T) *db.DB { return horizonDB } +// TODO, remove refs to internal core db, need to remove scenario tests which require this +// to seed core db. func corePostgres(t *testing.T) *db.DB { if coreDB != nil { return coreDB @@ -60,6 +62,8 @@ func HorizonROURL() string { return horizonDB.RO_DSN } +// TODO, remove refs to core db, need to remove scenario tests which require this +// to seed core db. func StellarCore(t *testing.T) *sqlx.DB { if coreDBConn != nil { return coreDBConn @@ -68,6 +72,8 @@ func StellarCore(t *testing.T) *sqlx.DB { return coreDBConn } +// TODO, remove refs to core db, need to remove scenario tests which require this +// to seed core db. func StellarCoreURL() string { if coreDB == nil { log.Panic(fmt.Errorf("StellarCore not initialized")) diff --git a/services/horizon/internal/test/main.go b/services/horizon/internal/test/main.go index fea814b4c3..93ed4a94db 100644 --- a/services/horizon/internal/test/main.go +++ b/services/horizon/internal/test/main.go @@ -25,11 +25,12 @@ type StaticMockServer struct { // T provides a common set of functionality for each test in horizon type T struct { - T *testing.T - Assert *assert.Assertions - Require *require.Assertions - Ctx context.Context - HorizonDB *sqlx.DB + T *testing.T + Assert *assert.Assertions + Require *require.Assertions + Ctx context.Context + HorizonDB *sqlx.DB + //TODO - remove ref to core db once scenario tests are removed. CoreDB *sqlx.DB EndLogTest func() []logrus.Entry } diff --git a/services/horizon/internal/test/t.go b/services/horizon/internal/test/t.go index c2a75da986..2f86f70565 100644 --- a/services/horizon/internal/test/t.go +++ b/services/horizon/internal/test/t.go @@ -18,7 +18,7 @@ import ( "github.com/stellar/go/support/render/hal" ) -// CoreSession returns a db.Session instance pointing at the stellar core test database +// TODO - remove ref to core db once scenario tests are removed. func (t *T) CoreSession() *db.Session { return &db.Session{ DB: t.CoreDB, @@ -143,17 +143,7 @@ func (t *T) UnmarshalExtras(r io.Reader) map[string]string { func (t *T) LoadLedgerStatus() ledger.Status { var next ledger.Status - err := t.CoreSession().GetRaw(t.Ctx, &next, ` - SELECT - COALESCE(MAX(ledgerseq), 0) as core_latest - FROM ledgerheaders - `) - - if err != nil { - panic(err) - } - - err = t.HorizonSession().GetRaw(t.Ctx, &next, ` + err := t.HorizonSession().GetRaw(t.Ctx, &next, ` SELECT COALESCE(MIN(sequence), 0) as history_elder, COALESCE(MAX(sequence), 0) as history_latest From 15324b7060dd945da594609689b4969fa06940b3 Mon Sep 17 00:00:00 2001 From: shawn Date: Fri, 12 Jan 2024 09:04:31 -0800 Subject: [PATCH 05/21] #5156: do not include range prep time in 'Reingestion done' logged duration (#5159) --- .../internal/ingest/fsm_reingest_history_range_state.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/services/horizon/internal/ingest/fsm_reingest_history_range_state.go b/services/horizon/internal/ingest/fsm_reingest_history_range_state.go index 4e60f71cd1..e2e7724d68 100644 --- a/services/horizon/internal/ingest/fsm_reingest_history_range_state.go +++ b/services/horizon/internal/ingest/fsm_reingest_history_range_state.go @@ -124,13 +124,14 @@ func (h reingestHistoryRangeState) run(s *system) (transition, error) { h.fromLedger = 2 } - startTime := time.Now() + var startTime time.Time if h.force { if t, err := h.prepareRange(s); err != nil { return t, err } + startTime = time.Now() if err := s.historyQ.Begin(s.ctx); err != nil { return stop(), errors.Wrap(err, "Error starting a transaction") } @@ -167,6 +168,7 @@ func (h reingestHistoryRangeState) run(s *system) (transition, error) { return t, err } + startTime = time.Now() if err := s.historyQ.Begin(s.ctx); err != nil { return stop(), errors.Wrap(err, "Error starting a transaction") } From 34839104234f2d0a64f12a3d6752120c9b116c87 Mon Sep 17 00:00:00 2001 From: shawn Date: Thu, 18 Jan 2024 12:30:14 -0800 Subject: [PATCH 06/21] http archive requests include user agent and metrics (#5166) --- historyarchive/archive.go | 66 +++++++++++++++++++ historyarchive/archive_pool.go | 18 +++-- historyarchive/archive_test.go | 25 +++++++ historyarchive/mocks.go | 29 ++++++++ ingest/ledgerbackend/captive_core_backend.go | 1 + .../captive_core_backend_test.go | 15 ++++- services/horizon/CHANGELOG.md | 4 +- services/horizon/internal/ingest/fsm.go | 28 ++++++++ .../ingest/history_archive_adapter.go | 5 ++ .../ingest/history_archive_adapter_test.go | 5 ++ services/horizon/internal/ingest/main.go | 12 ++++ .../internal/ingest/resume_state_test.go | 19 ++++++ 12 files changed, 219 insertions(+), 8 deletions(-) diff --git a/historyarchive/archive.go b/historyarchive/archive.go index 2d470a8026..13750c1fb4 100644 --- a/historyarchive/archive.go +++ b/historyarchive/archive.go @@ -17,6 +17,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" log "github.com/sirupsen/logrus" @@ -59,6 +60,51 @@ type Ledger struct { TransactionResult xdr.TransactionHistoryResultEntry } +// golang will auto wrap them back to 0 if they overflow after addition. +type archiveStats struct { + requests atomic.Uint32 + fileDownloads atomic.Uint32 + fileUploads atomic.Uint32 + backendName string +} + +type ArchiveStats interface { + GetRequests() uint32 + GetDownloads() uint32 + GetUploads() uint32 + GetBackendName() string +} + +func (as *archiveStats) incrementDownloads() { + as.fileDownloads.Add(1) + as.incrementRequests() +} + +func (as *archiveStats) incrementUploads() { + as.fileUploads.Add(1) + as.incrementRequests() +} + +func (as *archiveStats) incrementRequests() { + as.requests.Add(1) +} + +func (as *archiveStats) GetRequests() uint32 { + return as.requests.Load() +} + +func (as *archiveStats) GetDownloads() uint32 { + return as.fileDownloads.Load() +} + +func (as *archiveStats) GetUploads() uint32 { + return as.fileUploads.Load() +} + +func (as *archiveStats) GetBackendName() string { + return as.backendName +} + type ArchiveBackend interface { Exists(path string) (bool, error) Size(path string) (int64, error) @@ -87,6 +133,7 @@ type ArchiveInterface interface { GetXdrStreamForHash(hash Hash) (*XdrStream, error) GetXdrStream(pth string) (*XdrStream, error) GetCheckpointManager() CheckpointManager + GetStats() []ArchiveStats } var _ ArchiveInterface = &Archive{} @@ -115,6 +162,11 @@ type Archive struct { checkpointManager CheckpointManager backend ArchiveBackend + stats archiveStats +} + +func (arch *Archive) GetStats() []ArchiveStats { + return []ArchiveStats{&arch.stats} } func (arch *Archive) GetCheckpointManager() CheckpointManager { @@ -124,6 +176,7 @@ func (arch *Archive) GetCheckpointManager() CheckpointManager { func (a *Archive) GetPathHAS(path string) (HistoryArchiveState, error) { var has HistoryArchiveState rdr, err := a.backend.GetFile(path) + a.stats.incrementDownloads() if err != nil { return has, err } @@ -150,6 +203,7 @@ func (a *Archive) GetPathHAS(path string) (HistoryArchiveState, error) { func (a *Archive) PutPathHAS(path string, has HistoryArchiveState, opts *CommandOptions) error { exists, err := a.backend.Exists(path) + a.stats.incrementRequests() if err != nil { return err } @@ -161,19 +215,23 @@ func (a *Archive) PutPathHAS(path string, has HistoryArchiveState, opts *Command if err != nil { return err } + a.stats.incrementUploads() return a.backend.PutFile(path, ioutil.NopCloser(bytes.NewReader(buf))) } func (a *Archive) BucketExists(bucket Hash) (bool, error) { + a.stats.incrementRequests() return a.backend.Exists(BucketPath(bucket)) } func (a *Archive) BucketSize(bucket Hash) (int64, error) { + a.stats.incrementRequests() return a.backend.Size(BucketPath(bucket)) } func (a *Archive) CategoryCheckpointExists(cat string, chk uint32) (bool, error) { + a.stats.incrementRequests() return a.backend.Exists(CategoryCheckpointPath(cat, chk)) } @@ -306,14 +364,17 @@ func (a *Archive) PutRootHAS(has HistoryArchiveState, opts *CommandOptions) erro } func (a *Archive) ListBucket(dp DirPrefix) (chan string, chan error) { + a.stats.incrementRequests() return a.backend.ListFiles(path.Join("bucket", dp.Path())) } func (a *Archive) ListAllBuckets() (chan string, chan error) { + a.stats.incrementRequests() return a.backend.ListFiles("bucket") } func (a *Archive) ListAllBucketHashes() (chan Hash, chan error) { + a.stats.incrementRequests() sch, errs := a.backend.ListFiles("bucket") ch := make(chan Hash) rx := regexp.MustCompile("bucket" + hexPrefixPat + "bucket-([0-9a-f]{64})\\.xdr\\.gz$") @@ -335,6 +396,7 @@ func (a *Archive) ListCategoryCheckpoints(cat string, pth string) (chan uint32, rx := regexp.MustCompile(cat + hexPrefixPat + cat + "-([0-9a-f]{8})\\." + regexp.QuoteMeta(ext) + "$") sch, errs := a.backend.ListFiles(path.Join(cat, pth)) + a.stats.incrementRequests() ch := make(chan uint32) errs = makeErrorPump(errs) @@ -372,6 +434,7 @@ func (a *Archive) GetXdrStream(pth string) (*XdrStream, error) { return nil, errors.New("File has non-.xdr.gz suffix: " + pth) } rdr, err := a.backend.GetFile(pth) + a.stats.incrementDownloads() if err != nil { return nil, err } @@ -426,6 +489,9 @@ func Connect(u string, opts ConnectOptions) (*Archive, error) { } else { err = errors.New("unknown URL scheme: '" + parsed.Scheme + "'") } + + arch.stats = archiveStats{backendName: parsed.String()} + return &arch, err } diff --git a/historyarchive/archive_pool.go b/historyarchive/archive_pool.go index 590988e483..022259d7f0 100644 --- a/historyarchive/archive_pool.go +++ b/historyarchive/archive_pool.go @@ -33,11 +33,7 @@ func NewArchivePool(archiveURLs []string, config ConnectOptions) (ArchivePool, e for _, url := range archiveURLs { archive, err := Connect( url, - ConnectOptions{ - NetworkPassphrase: config.NetworkPassphrase, - CheckpointFrequency: config.CheckpointFrequency, - Context: config.Context, - }, + config, ) if err != nil { @@ -55,8 +51,18 @@ func NewArchivePool(archiveURLs []string, config ConnectOptions) (ArchivePool, e return validArchives, nil } +func (pa ArchivePool) GetStats() []ArchiveStats { + stats := []ArchiveStats{} + for _, archive := range pa { + if len(archive.GetStats()) == 1 { + stats = append(stats, archive.GetStats()[0]) + } + } + return stats +} + // Ensure the pool conforms to the ArchiveInterface -var _ ArchiveInterface = ArchivePool{} +var _ ArchiveInterface = &ArchivePool{} // Below are the ArchiveInterface method implementations. diff --git a/historyarchive/archive_test.go b/historyarchive/archive_test.go index 9f5f4fc0c9..78eed45378 100644 --- a/historyarchive/archive_test.go +++ b/historyarchive/archive_test.go @@ -13,6 +13,8 @@ import ( "io" "io/ioutil" "math/big" + "net/http" + "net/http/httptest" "os" "strings" "testing" @@ -176,6 +178,25 @@ func TestScan(t *testing.T) { GetRandomPopulatedArchive().Scan(opts) } +func TestConfiguresHttpUserAgent(t *testing.T) { + var userAgent string + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + userAgent = r.Header["User-Agent"][0] + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + archive, err := Connect(server.URL, ConnectOptions{ + UserAgent: "uatest", + }) + assert.NoError(t, err) + + ok, err := archive.BucketExists(EmptyXdrArrayHash()) + assert.True(t, ok) + assert.NoError(t, err) + assert.Equal(t, userAgent, "uatest") +} + func TestScanSize(t *testing.T) { defer cleanup() opts := testOptions() @@ -523,6 +544,8 @@ func assertXdrEquals(t *testing.T, a, b xdrEntry) { func TestGetLedgers(t *testing.T) { archive := GetTestMockArchive() _, err := archive.GetLedgers(1000, 1002) + assert.Equal(t, uint32(1), archive.GetStats()[0].GetRequests()) + assert.Equal(t, uint32(0), archive.GetStats()[0].GetDownloads()) assert.EqualError(t, err, "checkpoint 1023 is not published") ledgerHeaders := []xdr.LedgerHeaderHistoryEntry{ @@ -610,6 +633,8 @@ func TestGetLedgers(t *testing.T) { ledgers, err := archive.GetLedgers(1000, 1002) assert.NoError(t, err) assert.Len(t, ledgers, 3) + assert.Equal(t, uint32(7), archive.GetStats()[0].GetRequests()) // it started at 1, incurred 6 requests total, 3 queries, 3 downloads + assert.Equal(t, uint32(3), archive.GetStats()[0].GetDownloads()) // started 0, incurred 3 file downloads for i, seq := range []uint32{1000, 1001, 1002} { ledger := ledgers[seq] assertXdrEquals(t, ledgerHeaders[i], ledger.Header) diff --git a/historyarchive/mocks.go b/historyarchive/mocks.go index 3952211cd3..b256d0d7b9 100644 --- a/historyarchive/mocks.go +++ b/historyarchive/mocks.go @@ -103,3 +103,32 @@ func (m *MockArchive) GetXdrStream(pth string) (*XdrStream, error) { a := m.Called(pth) return a.Get(0).(*XdrStream), a.Error(1) } + +func (m *MockArchive) GetStats() []ArchiveStats { + a := m.Called() + return a.Get(0).([]ArchiveStats) +} + +type MockArchiveStats struct { + mock.Mock +} + +func (m *MockArchiveStats) GetRequests() uint32 { + a := m.Called() + return a.Get(0).(uint32) +} + +func (m *MockArchiveStats) GetDownloads() uint32 { + a := m.Called() + return a.Get(0).(uint32) +} + +func (m *MockArchiveStats) GetUploads() uint32 { + a := m.Called() + return a.Get(0).(uint32) +} + +func (m *MockArchiveStats) GetBackendName() string { + a := m.Called() + return a.Get(0).(string) +} diff --git a/ingest/ledgerbackend/captive_core_backend.go b/ingest/ledgerbackend/captive_core_backend.go index 285247bffa..b7b96938f2 100644 --- a/ingest/ledgerbackend/captive_core_backend.go +++ b/ingest/ledgerbackend/captive_core_backend.go @@ -178,6 +178,7 @@ func NewCaptive(config CaptiveCoreConfig) (*CaptiveStellarCore, error) { NetworkPassphrase: config.NetworkPassphrase, CheckpointFrequency: config.CheckpointFrequency, Context: config.Context, + UserAgent: config.UserAgent, }, ) diff --git a/ingest/ledgerbackend/captive_core_backend_test.go b/ingest/ledgerbackend/captive_core_backend_test.go index fb2ea4eff4..5178fd97a1 100644 --- a/ingest/ledgerbackend/captive_core_backend_test.go +++ b/ingest/ledgerbackend/captive_core_backend_test.go @@ -4,6 +4,8 @@ import ( "context" "encoding/hex" "fmt" + "net/http" + "net/http/httptest" "os" "sync" "testing" @@ -138,9 +140,16 @@ func TestCaptiveNew(t *testing.T) { require.NoError(t, err) defer os.RemoveAll(storagePath) + var userAgent string + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + userAgent = r.Header["User-Agent"][0] + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + executablePath := "/etc/stellar-core" networkPassphrase := network.PublicNetworkPassphrase - historyURLs := []string{"http://history.stellar.org/prd/core-live/core_live_001"} + historyURLs := []string{server.URL} captiveStellarCore, err := NewCaptive( CaptiveCoreConfig{ @@ -148,12 +157,16 @@ func TestCaptiveNew(t *testing.T) { NetworkPassphrase: networkPassphrase, HistoryArchiveURLs: historyURLs, StoragePath: storagePath, + UserAgent: "uatest", }, ) assert.NoError(t, err) assert.Equal(t, uint32(0), captiveStellarCore.nextLedger) assert.NotNil(t, captiveStellarCore.archive) + _, err = captiveStellarCore.archive.BucketExists(historyarchive.EmptyXdrArrayHash()) + assert.NoError(t, err) + assert.Equal(t, "uatest", userAgent) } func TestCaptivePrepareRange(t *testing.T) { diff --git a/services/horizon/CHANGELOG.md b/services/horizon/CHANGELOG.md index 962fca7128..a5d5c52dde 100644 --- a/services/horizon/CHANGELOG.md +++ b/services/horizon/CHANGELOG.md @@ -5,8 +5,10 @@ file. This project adheres to [Semantic Versioning](http://semver.org/). ## Unreleased -### Added +### Fixed +- http archive requests include user agent and metrics ([5166](https://github.com/stellar/go/pull/5166)) +### Added - Add a deprecation warning for using command-line flags when running Horizon ([5051](https://github.com/stellar/go/pull/5051)) ### Breaking Changes diff --git a/services/horizon/internal/ingest/fsm.go b/services/horizon/internal/ingest/fsm.go index 3cc6d31c7d..d3831fca5c 100644 --- a/services/horizon/internal/ingest/fsm.go +++ b/services/horizon/internal/ingest/fsm.go @@ -8,6 +8,7 @@ import ( "github.com/prometheus/client_golang/prometheus" + "github.com/stellar/go/historyarchive" "github.com/stellar/go/ingest" "github.com/stellar/go/ingest/ledgerbackend" "github.com/stellar/go/support/errors" @@ -523,6 +524,13 @@ func (r resumeState) run(s *system) (transition, error) { r.addLedgerStatsMetricFromMap(s, "trades", tradeStatsMap) r.addProcessorDurationsMetricFromMap(s, stats.transactionDurations) + // since a single system instance is shared throughout all states, + // this will sweep up increments to history archive counters + // done elsewhere such as verifyState invocations since the same system + // instance is passed there and the additional usages of archives will just + // roll up and be reported here as part of resumeState transition + addHistoryArchiveStatsMetrics(s, s.historyAdapter.GetStats()) + localLog := log.WithFields(logpkg.F{ "sequence": ingestLedger, "duration": duration, @@ -565,6 +573,26 @@ func (r resumeState) addProcessorDurationsMetricFromMap(s *system, m map[string] } } +func addHistoryArchiveStatsMetrics(s *system, stats []historyarchive.ArchiveStats) { + for _, historyServerStat := range stats { + s.Metrics().HistoryArchiveStatsCounter. + With(prometheus.Labels{ + "source": historyServerStat.GetBackendName(), + "type": "file_downloads"}). + Add(float64(historyServerStat.GetDownloads())) + s.Metrics().HistoryArchiveStatsCounter. + With(prometheus.Labels{ + "source": historyServerStat.GetBackendName(), + "type": "file_uploads"}). + Add(float64(historyServerStat.GetUploads())) + s.Metrics().HistoryArchiveStatsCounter. + With(prometheus.Labels{ + "source": historyServerStat.GetBackendName(), + "type": "requests"}). + Add(float64(historyServerStat.GetRequests())) + } +} + type waitForCheckpointState struct{} func (waitForCheckpointState) String() string { diff --git a/services/horizon/internal/ingest/history_archive_adapter.go b/services/horizon/internal/ingest/history_archive_adapter.go index d4cde9436f..7e415787e3 100644 --- a/services/horizon/internal/ingest/history_archive_adapter.go +++ b/services/horizon/internal/ingest/history_archive_adapter.go @@ -18,6 +18,7 @@ type historyArchiveAdapterInterface interface { GetLatestLedgerSequence() (uint32, error) BucketListHash(sequence uint32) (xdr.Hash, error) GetState(ctx context.Context, sequence uint32) (ingest.ChangeReader, error) + GetStats() []historyarchive.ArchiveStats } // newHistoryArchiveAdapter is a constructor to make a historyArchiveAdapter @@ -71,3 +72,7 @@ func (haa *historyArchiveAdapter) GetState(ctx context.Context, sequence uint32) return sr, nil } + +func (haa *historyArchiveAdapter) GetStats() []historyarchive.ArchiveStats { + return haa.archive.GetStats() +} diff --git a/services/horizon/internal/ingest/history_archive_adapter_test.go b/services/horizon/internal/ingest/history_archive_adapter_test.go index 7c9207cbe4..20d84149fa 100644 --- a/services/horizon/internal/ingest/history_archive_adapter_test.go +++ b/services/horizon/internal/ingest/history_archive_adapter_test.go @@ -33,6 +33,11 @@ func (m *mockHistoryArchiveAdapter) GetState(ctx context.Context, sequence uint3 return args.Get(0).(ingest.ChangeReader), args.Error(1) } +func (m *mockHistoryArchiveAdapter) GetStats() []historyarchive.ArchiveStats { + a := m.Called() + return a.Get(0).([]historyarchive.ArchiveStats) +} + func TestGetState_Read(t *testing.T) { archive, e := getTestArchive() if !assert.NoError(t, e) { diff --git a/services/horizon/internal/ingest/main.go b/services/horizon/internal/ingest/main.go index 13f7017cbf..508476a55b 100644 --- a/services/horizon/internal/ingest/main.go +++ b/services/horizon/internal/ingest/main.go @@ -161,6 +161,9 @@ type Metrics struct { // ProcessorsRunDurationSummary exposes processors run durations. ProcessorsRunDurationSummary *prometheus.SummaryVec + + // ArchiveRequestCounter counts how many http requests are sent to history server + HistoryArchiveStatsCounter *prometheus.CounterVec } type System interface { @@ -390,6 +393,14 @@ func (s *system) initMetrics() { }, []string{"name"}, ) + + s.metrics.HistoryArchiveStatsCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "horizon", Subsystem: "ingest", Name: "history_archive_stats_total", + Help: "counters of different history archive stats", + }, + []string{"source", "type"}, + ) } func (s *system) GetCurrentState() State { @@ -415,6 +426,7 @@ func (s *system) RegisterMetrics(registry *prometheus.Registry) { registry.MustRegister(s.metrics.ProcessorsRunDuration) registry.MustRegister(s.metrics.ProcessorsRunDurationSummary) registry.MustRegister(s.metrics.StateVerifyLedgerEntriesCount) + registry.MustRegister(s.metrics.HistoryArchiveStatsCounter) s.ledgerBackend = ledgerbackend.WithMetrics(s.ledgerBackend, registry, "horizon") } diff --git a/services/horizon/internal/ingest/resume_state_test.go b/services/horizon/internal/ingest/resume_state_test.go index 013f176ae8..d989e7a9e5 100644 --- a/services/horizon/internal/ingest/resume_state_test.go +++ b/services/horizon/internal/ingest/resume_state_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" + "github.com/stellar/go/historyarchive" "github.com/stellar/go/ingest/ledgerbackend" "github.com/stellar/go/support/errors" "github.com/stellar/go/xdr" @@ -260,6 +261,12 @@ func (s *ResumeTestTestSuite) mockSuccessfulIngestion() { s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(100), nil).Once() s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once() s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(100), nil) + mockStats := &historyarchive.MockArchiveStats{} + mockStats.On("GetBackendName").Return("name") + mockStats.On("GetDownloads").Return(uint32(0)) + mockStats.On("GetRequests").Return(uint32(0)) + mockStats.On("GetUploads").Return(uint32(0)) + s.historyAdapter.On("GetStats").Return([]historyarchive.ArchiveStats{mockStats}).Once() s.runner.On("RunAllProcessorsOnLedger", mock.AnythingOfType("xdr.LedgerCloseMeta")). Run(func(args mock.Arguments) { @@ -370,6 +377,12 @@ func (s *ResumeTestTestSuite) TestReapingObjectsDisabled() { s.historyQ.On("GetExpStateInvalid", s.ctx).Return(false, nil).Once() s.historyQ.On("RebuildTradeAggregationBuckets", s.ctx, uint32(101), uint32(101), 0).Return(nil).Once() + mockStats := &historyarchive.MockArchiveStats{} + mockStats.On("GetBackendName").Return("name") + mockStats.On("GetDownloads").Return(uint32(0)) + mockStats.On("GetRequests").Return(uint32(0)) + mockStats.On("GetUploads").Return(uint32(0)) + s.historyAdapter.On("GetStats").Return([]historyarchive.ArchiveStats{mockStats}).Once() // Reap lookup tables not executed next, err := resumeState{latestSuccessfullyProcessedLedger: 100}.run(s.system) @@ -413,6 +426,12 @@ func (s *ResumeTestTestSuite) TestErrorReapingObjectsIgnored() { s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(100), nil).Once() s.historyQ.On("ReapLookupTables", mock.AnythingOfType("*context.timerCtx"), mock.Anything).Return(nil, nil, errors.New("error reaping objects")).Once() s.historyQ.On("Rollback").Return(nil).Once() + mockStats := &historyarchive.MockArchiveStats{} + mockStats.On("GetBackendName").Return("name") + mockStats.On("GetDownloads").Return(uint32(0)) + mockStats.On("GetRequests").Return(uint32(0)) + mockStats.On("GetUploads").Return(uint32(0)) + s.historyAdapter.On("GetStats").Return([]historyarchive.ArchiveStats{mockStats}).Once() next, err := resumeState{latestSuccessfullyProcessedLedger: 100}.run(s.system) s.Assert().NoError(err) From 33bf9b6a970303358481745f80b919343652d08a Mon Sep 17 00:00:00 2001 From: shawn Date: Thu, 18 Jan 2024 13:23:38 -0800 Subject: [PATCH 07/21] Fix tradeagg rebuild from reingest command with parallel workers (#5168) --- services/horizon/CHANGELOG.md | 4 +- services/horizon/cmd/db.go | 2 +- services/horizon/internal/ingest/fsm.go | 4 +- .../fsm_reingest_history_range_state.go | 5 --- .../ingest/ingest_history_range_state_test.go | 40 +++++++++---------- services/horizon/internal/ingest/main.go | 15 ++++++- services/horizon/internal/ingest/main_test.go | 9 ++++- services/horizon/internal/ingest/parallel.go | 36 +++++++++++++++-- .../horizon/internal/ingest/parallel_test.go | 21 ++++++---- 9 files changed, 91 insertions(+), 45 deletions(-) diff --git a/services/horizon/CHANGELOG.md b/services/horizon/CHANGELOG.md index a5d5c52dde..70cb6e833c 100644 --- a/services/horizon/CHANGELOG.md +++ b/services/horizon/CHANGELOG.md @@ -6,9 +6,11 @@ file. This project adheres to [Semantic Versioning](http://semver.org/). ## Unreleased ### Fixed -- http archive requests include user agent and metrics ([5166](https://github.com/stellar/go/pull/5166)) +- Trade agg rebuild errors reported on `db reingest range` with parellel workers ([5168](https://github.com/stellar/go/pull/5168)) +- http archive requests include user agent ([5166](https://github.com/stellar/go/pull/5166)) ### Added +- http archive requests include metrics ([5166](https://github.com/stellar/go/pull/5166)) - Add a deprecation warning for using command-line flags when running Horizon ([5051](https://github.com/stellar/go/pull/5051)) ### Breaking Changes diff --git a/services/horizon/cmd/db.go b/services/horizon/cmd/db.go index a0d0e6c518..725df622b0 100644 --- a/services/horizon/cmd/db.go +++ b/services/horizon/cmd/db.go @@ -443,7 +443,7 @@ func runDBReingestRange(ledgerRanges []history.LedgerRange, reingestForce bool, } defer system.Shutdown() - err = system.ReingestRange(ledgerRanges, reingestForce) + err = system.ReingestRange(ledgerRanges, reingestForce, true) if err != nil { if _, ok := errors.Cause(err).(ingest.ErrReingestRangeConflict); ok { return fmt.Errorf(`The range you have provided overlaps with Horizon's most recently ingested ledger. diff --git a/services/horizon/internal/ingest/fsm.go b/services/horizon/internal/ingest/fsm.go index d3831fca5c..59a1a7c969 100644 --- a/services/horizon/internal/ingest/fsm.go +++ b/services/horizon/internal/ingest/fsm.go @@ -499,7 +499,7 @@ func (r resumeState) run(s *system) (transition, error) { } rebuildStart := time.Now() - err = s.historyQ.RebuildTradeAggregationBuckets(s.ctx, ingestLedger, ingestLedger, s.config.RoundingSlippageFilter) + err = s.RebuildTradeAggregationBuckets(ingestLedger, ingestLedger) if err != nil { return retryResume(r), errors.Wrap(err, "error rebuilding trade aggregations") } @@ -741,7 +741,7 @@ func (v verifyRangeState) run(s *system) (transition, error) { Info("Processed ledger") } - err = s.historyQ.RebuildTradeAggregationBuckets(s.ctx, v.fromLedger, v.toLedger, s.config.RoundingSlippageFilter) + err = s.RebuildTradeAggregationBuckets(v.fromLedger, v.toLedger) if err != nil { return stop(), errors.Wrap(err, "error rebuilding trade aggregations") } diff --git a/services/horizon/internal/ingest/fsm_reingest_history_range_state.go b/services/horizon/internal/ingest/fsm_reingest_history_range_state.go index e2e7724d68..832898d021 100644 --- a/services/horizon/internal/ingest/fsm_reingest_history_range_state.go +++ b/services/horizon/internal/ingest/fsm_reingest_history_range_state.go @@ -183,11 +183,6 @@ func (h reingestHistoryRangeState) run(s *system) (transition, error) { } } - err := s.historyQ.RebuildTradeAggregationBuckets(s.ctx, h.fromLedger, h.toLedger, s.config.RoundingSlippageFilter) - if err != nil { - return stop(), errors.Wrap(err, "Error rebuilding trade aggregations") - } - log.WithFields(logpkg.F{ "from": h.fromLedger, "to": h.toLedger, diff --git a/services/horizon/internal/ingest/ingest_history_range_state_test.go b/services/horizon/internal/ingest/ingest_history_range_state_test.go index 4598008eb8..4f7d2c4944 100644 --- a/services/horizon/internal/ingest/ingest_history_range_state_test.go +++ b/services/horizon/internal/ingest/ingest_history_range_state_test.go @@ -304,16 +304,16 @@ func (s *ReingestHistoryRangeStateTestSuite) TearDownTest() { func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateInvalidRange() { // Recreate mock in this single test to remove Rollback assertion. s.historyQ = &mockDBQ{} - err := s.system.ReingestRange([]history.LedgerRange{{0, 0}}, false) + err := s.system.ReingestRange([]history.LedgerRange{{0, 0}}, false, true) s.Assert().EqualError(err, "Invalid range: {0 0} genesis ledger starts at 1") - err = s.system.ReingestRange([]history.LedgerRange{{0, 100}}, false) + err = s.system.ReingestRange([]history.LedgerRange{{0, 100}}, false, true) s.Assert().EqualError(err, "Invalid range: {0 100} genesis ledger starts at 1") - err = s.system.ReingestRange([]history.LedgerRange{{100, 0}}, false) + err = s.system.ReingestRange([]history.LedgerRange{{100, 0}}, false, true) s.Assert().EqualError(err, "Invalid range: {100 0} from > to") - err = s.system.ReingestRange([]history.LedgerRange{{100, 99}}, false) + err = s.system.ReingestRange([]history.LedgerRange{{100, 99}}, false, true) s.Assert().EqualError(err, "Invalid range: {100 99} from > to") } @@ -323,7 +323,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateInvali s.historyQ.On("Rollback").Return(nil).Once() s.historyQ.On("GetTx").Return(&sqlx.Tx{}).Once() s.system.maxLedgerPerFlush = 0 - err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false) + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false, true) s.Assert().EqualError(err, "invalid maxLedgerPerFlush, must be greater than 0") } @@ -332,28 +332,28 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateBeginR s.historyQ.On("GetLastLedgerIngestNonBlocking", s.ctx).Return(uint32(0), nil).Once() s.historyQ.On("Begin", s.ctx).Return(errors.New("my error")).Once() - err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false) + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false, true) s.Assert().EqualError(err, "Error starting a transaction: my error") } func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateGetLastLedgerIngestNonBlockingError() { s.historyQ.On("GetLastLedgerIngestNonBlocking", s.ctx).Return(uint32(0), errors.New("my error")).Once() - err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false) + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false, true) s.Assert().EqualError(err, "Error getting last ingested ledger: my error") } func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateRangeOverlaps() { s.historyQ.On("GetLastLedgerIngestNonBlocking", s.ctx).Return(uint32(190), nil).Once() - err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false) + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false, true) s.Assert().Equal(ErrReingestRangeConflict{190}, err) } func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStatRangeOverlapsAtEnd() { s.historyQ.On("GetLastLedgerIngestNonBlocking", s.ctx).Return(uint32(200), nil).Once() - err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false) + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false, true) s.Assert().Equal(ErrReingestRangeConflict{200}, err) } @@ -369,7 +369,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateClearH "DeleteRangeAll", s.ctx, toidFrom.ToInt64(), toidTo.ToInt64(), ).Return(errors.New("my error")).Once() - err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false) + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false, true) s.Assert().EqualError(err, "error in DeleteRangeAll: my error") } @@ -397,7 +397,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateRunTra s.ledgerBackend.On("GetLedger", s.ctx, uint32(100)).Return(meta, nil).Once() s.runner.On("RunTransactionProcessorsOnLedgers", []xdr.LedgerCloseMeta{meta}).Return(errors.New("my error")).Once() - err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false) + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false, true) s.Assert().EqualError(err, "error processing ledger range 100 - 100: my error") } @@ -428,7 +428,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateCommit s.runner.On("RunTransactionProcessorsOnLedgers", []xdr.LedgerCloseMeta{meta}).Return(nil).Once() } - err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false) + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false, true) s.Assert().EqualError(err, "Error committing db transaction: my error") } @@ -460,7 +460,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateSucces } // system.maxLedgerPerFlush has been set by default to 1 in test suite setup - err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false) + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false, true) s.Assert().NoError(err) } @@ -500,7 +500,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateSucces s.runner.On("RunTransactionProcessorsOnLedgers", firstLedgersBatch).Return(nil).Once() s.runner.On("RunTransactionProcessorsOnLedgers", secondLedgersBatch).Return(nil).Once() s.system.maxLedgerPerFlush = 60 - err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false) + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false, true) s.Assert().NoError(err) } @@ -534,7 +534,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateSucces s.ledgerBackend.On("GetLedger", s.ctx, uint32(100)).Return(meta, nil).Once() s.runner.On("RunTransactionProcessorsOnLedgers", []xdr.LedgerCloseMeta{meta}).Return(nil).Once() - err := s.system.ReingestRange([]history.LedgerRange{{100, 100}}, false) + err := s.system.ReingestRange([]history.LedgerRange{{100, 100}}, false, true) s.Assert().NoError(err) } @@ -543,7 +543,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateForceG s.historyQ.On("Rollback").Return(nil).Once() s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), errors.New("my error")).Once() - err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true) + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true, true) s.Assert().EqualError(err, "Error getting last ingested ledger: my error") } @@ -576,7 +576,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateForce( } // system.maxLedgerPerFlush has been set by default to 1 in test suite setup - err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true) + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true, true) s.Assert().NoError(err) } @@ -610,7 +610,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateForceL s.ledgerBackend.On("GetLedger", s.ctx, uint32(106)).Return(xdr.LedgerCloseMeta{}, errors.New("my error")).Once() // system.maxLedgerPerFlush has been set by default to 1 in test suite setup - err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true) + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true, true) s.Assert().EqualError(err, "error getting ledger: my error") } @@ -644,7 +644,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateForceL s.ledgerBackend.On("GetLedger", s.ctx, uint32(106)).Return(xdr.LedgerCloseMeta{}, errors.New("my error")).Once() // system.maxLedgerPerFlush has been set by default to 1 in test suite setup - err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true) + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true, true) s.Assert().EqualError(err, "Error committing db transaction: error getting ledger: my error") } @@ -686,6 +686,6 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateForceW s.runner.On("RunTransactionProcessorsOnLedgers", secondLedgersBatch).Return(nil).Once() s.system.maxLedgerPerFlush = 60 - err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true) + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true, true) s.Assert().NoError(err) } diff --git a/services/horizon/internal/ingest/main.go b/services/horizon/internal/ingest/main.go index 508476a55b..f064c4695c 100644 --- a/services/horizon/internal/ingest/main.go +++ b/services/horizon/internal/ingest/main.go @@ -173,10 +173,11 @@ type System interface { StressTest(numTransactions, changesPerTransaction int) error VerifyRange(fromLedger, toLedger uint32, verifyState bool) error BuildState(sequence uint32, skipChecks bool) error - ReingestRange(ledgerRanges []history.LedgerRange, force bool) error + ReingestRange(ledgerRanges []history.LedgerRange, force bool, rebuildTradeAgg bool) error BuildGenesisState() error Shutdown() GetCurrentState() State + RebuildTradeAggregationBuckets(fromLedger, toLedger uint32) error } type system struct { @@ -521,7 +522,7 @@ func validateRanges(ledgerRanges []history.LedgerRange) error { // ReingestRange runs the ingestion pipeline on the range of ledgers ingesting // history data only. -func (s *system) ReingestRange(ledgerRanges []history.LedgerRange, force bool) error { +func (s *system) ReingestRange(ledgerRanges []history.LedgerRange, force bool, rebuildTradeAgg bool) error { if err := validateRanges(ledgerRanges); err != nil { return err } @@ -542,10 +543,20 @@ func (s *system) ReingestRange(ledgerRanges []history.LedgerRange, force bool) e if err != nil { return err } + if rebuildTradeAgg { + err = s.RebuildTradeAggregationBuckets(cur.StartSequence, cur.EndSequence) + if err != nil { + return errors.Wrap(err, "Error rebuilding trade aggregations") + } + } } return nil } +func (s *system) RebuildTradeAggregationBuckets(fromLedger, toLedger uint32) error { + return s.historyQ.RebuildTradeAggregationBuckets(s.ctx, fromLedger, toLedger, s.config.RoundingSlippageFilter) +} + // BuildGenesisState runs the ingestion pipeline on genesis ledger. Transitions // to stopState when done. func (s *system) BuildGenesisState() error { diff --git a/services/horizon/internal/ingest/main_test.go b/services/horizon/internal/ingest/main_test.go index 460c27e062..80b5a40ed1 100644 --- a/services/horizon/internal/ingest/main_test.go +++ b/services/horizon/internal/ingest/main_test.go @@ -592,8 +592,8 @@ func (m *mockSystem) BuildState(sequence uint32, skipChecks bool) error { return args.Error(0) } -func (m *mockSystem) ReingestRange(ledgerRanges []history.LedgerRange, force bool) error { - args := m.Called(ledgerRanges, force) +func (m *mockSystem) ReingestRange(ledgerRanges []history.LedgerRange, force bool, rebuildTradeAgg bool) error { + args := m.Called(ledgerRanges, force, rebuildTradeAgg) return args.Error(0) } @@ -607,6 +607,11 @@ func (m *mockSystem) GetCurrentState() State { return args.Get(0).(State) } +func (m *mockSystem) RebuildTradeAggregationBuckets(fromLedger, toLedger uint32) error { + args := m.Called(fromLedger, toLedger) + return args.Error(0) +} + func (m *mockSystem) Shutdown() { m.Called() } diff --git a/services/horizon/internal/ingest/parallel.go b/services/horizon/internal/ingest/parallel.go index 525f153b81..4f07c21cc4 100644 --- a/services/horizon/internal/ingest/parallel.go +++ b/services/horizon/internal/ingest/parallel.go @@ -2,6 +2,7 @@ package ingest import ( "fmt" + "math" "sync" "github.com/stellar/go/services/horizon/internal/db2/history" @@ -61,7 +62,7 @@ func (ps *ParallelSystems) runReingestWorker(s System, stop <-chan struct{}, rei case <-stop: return rangeError{} case reingestRange := <-reingestJobQueue: - err := s.ReingestRange([]history.LedgerRange{reingestRange}, false) + err := s.ReingestRange([]history.LedgerRange{reingestRange}, false, false) if err != nil { return rangeError{ err: err, @@ -73,7 +74,24 @@ func (ps *ParallelSystems) runReingestWorker(s System, stop <-chan struct{}, rei } } -func enqueueReingestTasks(ledgerRanges []history.LedgerRange, batchSize uint32, stop <-chan struct{}, reingestJobQueue chan<- history.LedgerRange) { +func (ps *ParallelSystems) rebuildTradeAggRanges(ledgerRanges []history.LedgerRange) error { + s, err := ps.systemFactory(ps.config) + if err != nil { + return err + } + + for _, cur := range ledgerRanges { + err := s.RebuildTradeAggregationBuckets(cur.StartSequence, cur.EndSequence) + if err != nil { + return errors.Wrapf(err, "Error rebuilding trade aggregations for range start=%v, stop=%v", cur.StartSequence, cur.EndSequence) + } + } + return nil +} + +// returns the lowest ledger to start from of all ledgerRanges +func enqueueReingestTasks(ledgerRanges []history.LedgerRange, batchSize uint32, stop <-chan struct{}, reingestJobQueue chan<- history.LedgerRange) uint32 { + lowestLedger := uint32(math.MaxUint32) for _, cur := range ledgerRanges { for subRangeFrom := cur.StartSequence; subRangeFrom < cur.EndSequence; { // job queuing @@ -83,12 +101,16 @@ func enqueueReingestTasks(ledgerRanges []history.LedgerRange, batchSize uint32, } select { case <-stop: - return + return lowestLedger case reingestJobQueue <- history.LedgerRange{StartSequence: subRangeFrom, EndSequence: subRangeTo}: } + if subRangeFrom < lowestLedger { + lowestLedger = subRangeFrom + } subRangeFrom = subRangeTo + 1 } } + return lowestLedger } func calculateParallelLedgerBatchSize(rangeSize uint32, batchSizeSuggestion uint32, workerCount uint) uint32 { @@ -166,7 +188,7 @@ func (ps *ParallelSystems) ReingestRange(ledgerRanges []history.LedgerRange, bat }() } - enqueueReingestTasks(ledgerRanges, batchSize, stop, reingestJobQueue) + lowestLedger := enqueueReingestTasks(ledgerRanges, batchSize, stop, reingestJobQueue) stopOnce.Do(func() { close(stop) @@ -176,7 +198,13 @@ func (ps *ParallelSystems) ReingestRange(ledgerRanges []history.LedgerRange, bat if lowestRangeErr != nil { lastLedger := ledgerRanges[len(ledgerRanges)-1].EndSequence + if err := ps.rebuildTradeAggRanges([]history.LedgerRange{{StartSequence: lowestLedger, EndSequence: lowestRangeErr.ledgerRange.StartSequence}}); err != nil { + log.WithError(err).Errorf("error when trying to rebuild trade agg for partially completed portion of overall parallel reingestion range, start=%v, stop=%v", lowestLedger, lowestRangeErr.ledgerRange.StartSequence) + } return errors.Wrapf(lowestRangeErr, "job failed, recommended restart range: [%d, %d]", lowestRangeErr.ledgerRange.StartSequence, lastLedger) } + if err := ps.rebuildTradeAggRanges(ledgerRanges); err != nil { + return err + } return nil } diff --git a/services/horizon/internal/ingest/parallel_test.go b/services/horizon/internal/ingest/parallel_test.go index 27ab0c459f..8004a4048c 100644 --- a/services/horizon/internal/ingest/parallel_test.go +++ b/services/horizon/internal/ingest/parallel_test.go @@ -31,7 +31,7 @@ func TestParallelReingestRange(t *testing.T) { m sync.Mutex ) result := &mockSystem{} - result.On("ReingestRange", mock.AnythingOfType("[]history.LedgerRange"), mock.AnythingOfType("bool")).Run( + result.On("ReingestRange", mock.AnythingOfType("[]history.LedgerRange"), false, false).Run( func(args mock.Arguments) { m.Lock() defer m.Unlock() @@ -39,6 +39,7 @@ func TestParallelReingestRange(t *testing.T) { // simulate call time.Sleep(time.Millisecond * time.Duration(10+rand.Int31n(50))) }).Return(error(nil)) + result.On("RebuildTradeAggregationBuckets", uint32(1), uint32(2050)).Return(nil).Once() factory := func(c Config) (System, error) { return result, nil } @@ -59,6 +60,7 @@ func TestParallelReingestRange(t *testing.T) { rangesCalled = nil system, err = newParallelSystems(config, 1, factory) assert.NoError(t, err) + result.On("RebuildTradeAggregationBuckets", uint32(1), uint32(1024)).Return(nil).Once() err = system.ReingestRange([]history.LedgerRange{{1, 1024}}, 64) result.AssertExpectations(t) expected = []history.LedgerRange{ @@ -75,8 +77,10 @@ func TestParallelReingestRangeError(t *testing.T) { config := Config{} result := &mockSystem{} // Fail on the second range - result.On("ReingestRange", []history.LedgerRange{{1537, 1792}}, mock.AnythingOfType("bool")).Return(errors.New("failed because of foo")) - result.On("ReingestRange", mock.AnythingOfType("[]history.LedgerRange"), mock.AnythingOfType("bool")).Return(error(nil)) + result.On("ReingestRange", []history.LedgerRange{{1537, 1792}}, false, false).Return(errors.New("failed because of foo")).Once() + result.On("ReingestRange", mock.AnythingOfType("[]history.LedgerRange"), false, false).Return(nil) + result.On("RebuildTradeAggregationBuckets", uint32(1), uint32(1537)).Return(nil).Once() + factory := func(c Config) (System, error) { return result, nil } @@ -94,17 +98,18 @@ func TestParallelReingestRangeErrorInEarlierJob(t *testing.T) { wg.Add(1) result := &mockSystem{} // Fail on an lower subrange after the first error - result.On("ReingestRange", []history.LedgerRange{{1025, 1280}}, mock.AnythingOfType("bool")).Run(func(mock.Arguments) { + result.On("ReingestRange", []history.LedgerRange{{1025, 1280}}, false, false).Run(func(mock.Arguments) { // Wait for a more recent range to error wg.Wait() // This sleep should help making sure the result of this range is processed later than the one below // (there are no guarantees without instrumenting ReingestRange(), but that's too complicated) time.Sleep(50 * time.Millisecond) - }).Return(errors.New("failed because of foo")) - result.On("ReingestRange", []history.LedgerRange{{1537, 1792}}, mock.AnythingOfType("bool")).Run(func(mock.Arguments) { + }).Return(errors.New("failed because of foo")).Once() + result.On("ReingestRange", []history.LedgerRange{{1537, 1792}}, false, false).Run(func(mock.Arguments) { wg.Done() - }).Return(errors.New("failed because of bar")) - result.On("ReingestRange", mock.AnythingOfType("[]history.LedgerRange"), mock.AnythingOfType("bool")).Return(error(nil)) + }).Return(errors.New("failed because of bar")).Once() + result.On("ReingestRange", mock.AnythingOfType("[]history.LedgerRange"), false, false).Return(error(nil)) + result.On("RebuildTradeAggregationBuckets", uint32(1), uint32(1025)).Return(nil).Once() factory := func(c Config) (System, error) { return result, nil From 477db6f584d2dba8fd4658ba8197d38be20afab6 Mon Sep 17 00:00:00 2001 From: Shawn Reuland Date: Fri, 19 Jan 2024 12:00:24 -0800 Subject: [PATCH 08/21] 2.28.0 release prep, update ci tests for latest soroban and changelog notes --- .github/workflows/horizon.yml | 6 +++--- services/horizon/CHANGELOG.md | 5 +++++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/.github/workflows/horizon.yml b/.github/workflows/horizon.yml index 6b94521e0a..873fcd0243 100644 --- a/.github/workflows/horizon.yml +++ b/.github/workflows/horizon.yml @@ -33,9 +33,9 @@ jobs: env: HORIZON_INTEGRATION_TESTS_ENABLED: true HORIZON_INTEGRATION_TESTS_CORE_MAX_SUPPORTED_PROTOCOL: ${{ matrix.protocol-version }} - PROTOCOL_20_CORE_DEBIAN_PKG_VERSION: 20.0.0-1615.617729910.focal - PROTOCOL_20_CORE_DOCKER_IMG: stellar/unsafe-stellar-core:20.0.0-1615.617729910.focal - PROTOCOL_20_SOROBAN_RPC_DOCKER_IMG: stellar/soroban-rpc:20.0.0-tests-45 + PROTOCOL_20_CORE_DEBIAN_PKG_VERSION: 20.1.0-1656.114b833e7.focal + PROTOCOL_20_CORE_DOCKER_IMG: stellar/unsafe-stellar-core:20.1.0-1656.114b833e7.focal + PROTOCOL_20_SOROBAN_RPC_DOCKER_IMG: stellar/soroban-rpc:20.2.0 PROTOCOL_19_CORE_DEBIAN_PKG_VERSION: 19.14.0-1500.5664eff4e.focal PROTOCOL_19_CORE_DOCKER_IMG: stellar/stellar-core:19.14.0-1500.5664eff4e.focal PGHOST: localhost diff --git a/services/horizon/CHANGELOG.md b/services/horizon/CHANGELOG.md index 70cb6e833c..2ba1ccee94 100644 --- a/services/horizon/CHANGELOG.md +++ b/services/horizon/CHANGELOG.md @@ -5,13 +5,18 @@ file. This project adheres to [Semantic Versioning](http://semver.org/). ## Unreleased +## 2.28.0 + ### Fixed - Trade agg rebuild errors reported on `db reingest range` with parellel workers ([5168](https://github.com/stellar/go/pull/5168)) - http archive requests include user agent ([5166](https://github.com/stellar/go/pull/5166)) +- Network usage has been significantly reduced with caching. **Warning:** To support the cache, disk requirements may increase by up to 15GB ([5171](https://github.com/stellar/go/pull/5171)). ### Added +- improve ingestion performance timing ([4909](https://github.com/stellar/go/issues/4909)) - http archive requests include metrics ([5166](https://github.com/stellar/go/pull/5166)) - Add a deprecation warning for using command-line flags when running Horizon ([5051](https://github.com/stellar/go/pull/5051)) +- limit global flags displayed on cli help output ([5077](https://github.com/stellar/go/pull/5077)) ### Breaking Changes - Removed configuration flags `--stellar-core-url-db`, `--cursor-name` `--skip-cursor-update` , they were related to legacy non-captive core ingestion and are no longer usable. From 7e6d25fe1f1b911460f5cdfe3eda541a22ff5c9e Mon Sep 17 00:00:00 2001 From: George Date: Fri, 19 Jan 2024 12:17:45 -0800 Subject: [PATCH 09/21] historyarchive: Cache bucket files from history archives on disk. (#5171) * go mod tidy * Add double-close protection * Add request tracking when cache invokes upstream download * Add cache hit tracking * Move stat tracking to a separate file * Modify test to track stats+integrity after caching * Stop double-closing identical XDR stream readers --- go.mod | 2 +- historyarchive/archive.go | 106 +++++---- historyarchive/archive_cache.go | 202 ++++++++++++++++++ historyarchive/archive_pool.go | 6 +- historyarchive/archive_test.go | 34 ++- historyarchive/mocks.go | 5 + historyarchive/stats.go | 57 +++++ historyarchive/util.go | 3 +- historyarchive/xdrstream.go | 6 +- services/horizon/CHANGELOG.md | 14 +- services/horizon/internal/ingest/fsm.go | 5 + services/horizon/internal/ingest/main.go | 6 + .../internal/ingest/resume_state_test.go | 3 + 13 files changed, 371 insertions(+), 78 deletions(-) create mode 100644 historyarchive/archive_cache.go create mode 100644 historyarchive/stats.go diff --git a/go.mod b/go.mod index 952e274f91..a7fd2ef71c 100644 --- a/go.mod +++ b/go.mod @@ -102,7 +102,7 @@ require ( github.com/google/go-cmp v0.5.9 // indirect github.com/google/go-querystring v0.0.0-20160401233042-9235644dd9e5 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect - github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/hashicorp/golang-lru v1.0.2 github.com/imkira/go-interpol v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect diff --git a/historyarchive/archive.go b/historyarchive/archive.go index 13750c1fb4..d53ab37071 100644 --- a/historyarchive/archive.go +++ b/historyarchive/archive.go @@ -10,14 +10,12 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/url" "path" "regexp" "strconv" "strings" "sync" - "sync/atomic" log "github.com/sirupsen/logrus" @@ -52,6 +50,8 @@ type ConnectOptions struct { CheckpointFrequency uint32 // UserAgent is the value of `User-Agent` header. Applicable only for HTTP client. UserAgent string + // CacheConfig controls how/if bucket files are cached on the disk. + CacheConfig CacheOptions } type Ledger struct { @@ -60,51 +60,6 @@ type Ledger struct { TransactionResult xdr.TransactionHistoryResultEntry } -// golang will auto wrap them back to 0 if they overflow after addition. -type archiveStats struct { - requests atomic.Uint32 - fileDownloads atomic.Uint32 - fileUploads atomic.Uint32 - backendName string -} - -type ArchiveStats interface { - GetRequests() uint32 - GetDownloads() uint32 - GetUploads() uint32 - GetBackendName() string -} - -func (as *archiveStats) incrementDownloads() { - as.fileDownloads.Add(1) - as.incrementRequests() -} - -func (as *archiveStats) incrementUploads() { - as.fileUploads.Add(1) - as.incrementRequests() -} - -func (as *archiveStats) incrementRequests() { - as.requests.Add(1) -} - -func (as *archiveStats) GetRequests() uint32 { - return as.requests.Load() -} - -func (as *archiveStats) GetDownloads() uint32 { - return as.fileDownloads.Load() -} - -func (as *archiveStats) GetUploads() uint32 { - return as.fileUploads.Load() -} - -func (as *archiveStats) GetBackendName() string { - return as.backendName -} - type ArchiveBackend interface { Exists(path string) (bool, error) Size(path string) (int64, error) @@ -162,6 +117,7 @@ type Archive struct { checkpointManager CheckpointManager backend ArchiveBackend + cache *ArchiveBucketCache stats archiveStats } @@ -216,13 +172,11 @@ func (a *Archive) PutPathHAS(path string, has HistoryArchiveState, opts *Command return err } a.stats.incrementUploads() - return a.backend.PutFile(path, - ioutil.NopCloser(bytes.NewReader(buf))) + return a.backend.PutFile(path, io.NopCloser(bytes.NewReader(buf))) } func (a *Archive) BucketExists(bucket Hash) (bool, error) { - a.stats.incrementRequests() - return a.backend.Exists(BucketPath(bucket)) + return a.cachedExists(BucketPath(bucket)) } func (a *Archive) BucketSize(bucket Hash) (int64, error) { @@ -395,8 +349,8 @@ func (a *Archive) ListCategoryCheckpoints(cat string, pth string) (chan uint32, ext := categoryExt(cat) rx := regexp.MustCompile(cat + hexPrefixPat + cat + "-([0-9a-f]{8})\\." + regexp.QuoteMeta(ext) + "$") - sch, errs := a.backend.ListFiles(path.Join(cat, pth)) a.stats.incrementRequests() + sch, errs := a.backend.ListFiles(path.Join(cat, pth)) ch := make(chan uint32) errs = makeErrorPump(errs) @@ -433,14 +387,42 @@ func (a *Archive) GetXdrStream(pth string) (*XdrStream, error) { if !strings.HasSuffix(pth, ".xdr.gz") { return nil, errors.New("File has non-.xdr.gz suffix: " + pth) } - rdr, err := a.backend.GetFile(pth) - a.stats.incrementDownloads() + rdr, err := a.cachedGet(pth) if err != nil { return nil, err } return NewXdrGzStream(rdr) } +func (a *Archive) cachedGet(pth string) (io.ReadCloser, error) { + if a.cache != nil { + rdr, foundInCache, err := a.cache.GetFile(pth, a.backend) + if !foundInCache { + a.stats.incrementDownloads() + } else { + a.stats.incrementCacheHits() + } + if err == nil { + return rdr, nil + } + + // If there's an error, retry with the uncached backend. + a.cache.Evict(pth) + } + + a.stats.incrementDownloads() + return a.backend.GetFile(pth) +} + +func (a *Archive) cachedExists(pth string) (bool, error) { + if a.cache != nil && a.cache.Exists(pth) { + return true, nil + } + + a.stats.incrementRequests() + return a.backend.Exists(pth) +} + func Connect(u string, opts ConnectOptions) (*Archive, error) { arch := Archive{ networkPassphrase: opts.NetworkPassphrase, @@ -490,9 +472,21 @@ func Connect(u string, opts ConnectOptions) (*Archive, error) { err = errors.New("unknown URL scheme: '" + parsed.Scheme + "'") } - arch.stats = archiveStats{backendName: parsed.String()} + if err != nil { + return &arch, err + } - return &arch, err + if opts.CacheConfig.Cache { + cache, innerErr := MakeArchiveBucketCache(opts.CacheConfig) + if innerErr != nil { + return &arch, innerErr + } + + arch.cache = cache + } + + arch.stats = archiveStats{backendName: parsed.String()} + return &arch, nil } func MustConnect(u string, opts ConnectOptions) *Archive { diff --git a/historyarchive/archive_cache.go b/historyarchive/archive_cache.go new file mode 100644 index 0000000000..a3029a7ae4 --- /dev/null +++ b/historyarchive/archive_cache.go @@ -0,0 +1,202 @@ +package historyarchive + +import ( + "io" + "os" + "path" + + lru "github.com/hashicorp/golang-lru" + log "github.com/sirupsen/logrus" +) + +type CacheOptions struct { + Cache bool + + Path string + MaxFiles uint +} + +type ArchiveBucketCache struct { + path string + lru *lru.Cache + log *log.Entry +} + +// MakeArchiveBucketCache creates a cache on the disk at the given path that +// acts as an LRU cache, mimicking a particular upstream. +func MakeArchiveBucketCache(opts CacheOptions) (*ArchiveBucketCache, error) { + log_ := log. + WithField("subservice", "fs-cache"). + WithField("path", opts.Path). + WithField("cap", opts.MaxFiles) + + if _, err := os.Stat(opts.Path); err == nil || os.IsExist(err) { + log_.Warnf("Cache directory already exists, removing") + os.RemoveAll(opts.Path) + } + + backend := &ArchiveBucketCache{ + path: opts.Path, + log: log_, + } + + cache, err := lru.NewWithEvict(int(opts.MaxFiles), backend.onEviction) + if err != nil { + return &ArchiveBucketCache{}, err + } + backend.lru = cache + + log_.Info("Bucket cache initialized") + return backend, nil +} + +// GetFile retrieves the file contents from the local cache if present. +// Otherwise, it returns the same result as the upstream, adding that result +// into the local cache if possible. It returns a 3-tuple of a reader (which may +// be nil on an error), an indication of whether or not it was *found* in the +// cache, and any error. +func (abc *ArchiveBucketCache) GetFile( + filepath string, + upstream ArchiveBackend, +) (io.ReadCloser, bool, error) { + L := abc.log.WithField("key", filepath) + localPath := path.Join(abc.path, filepath) + + // If the lockfile exists, we should defer to the remote source but *not* + // update the cache, as it means there's an in-progress sync of the same + // file. + _, statErr := os.Stat(NameLockfile(localPath)) + if statErr == nil || os.IsExist(statErr) { + L.Info("Incomplete file in on-disk cache: deferring") + reader, err := upstream.GetFile(filepath) + return reader, false, err + } else if _, ok := abc.lru.Get(localPath); !ok { + L.Info("File does not exist in the cache: downloading") + + // Since it's not on-disk, pull it from the remote backend, shove it + // into the cache, and write it to disk. + remote, err := upstream.GetFile(filepath) + if err != nil { + return remote, false, err + } + + local, err := abc.createLocal(filepath) + if err != nil { + // If there's some local FS error, we can still continue with the + // remote version, so just log it and continue. + L.WithError(err).Warn("Creating cache file failed") + return remote, false, nil + } + + return teeReadCloser(remote, local, func() error { + L.Debug("Download complete: removing lockfile") + return os.Remove(NameLockfile(localPath)) + }), false, nil + } + + L.Info("Found file in cache") + // The cache claims it exists, so just give it a read and send it. + local, err := os.Open(localPath) + if err != nil { + // Uh-oh, the cache and the disk are not in sync somehow? Let's evict + // this value and try again (recurse) w/ the remote version. + L.WithError(err).Warn("Opening cached file failed") + abc.lru.Remove(localPath) + return abc.GetFile(filepath, upstream) + } + + return local, true, nil +} + +func (abc *ArchiveBucketCache) Exists(filepath string) bool { + return abc.lru.Contains(path.Join(abc.path, filepath)) +} + +// Close purges the cache and cleans up the filesystem. +func (abc *ArchiveBucketCache) Close() error { + abc.lru.Purge() + return os.RemoveAll(abc.path) +} + +// Evict removes a file from the cache and the filesystem. +func (abc *ArchiveBucketCache) Evict(filepath string) { + log.WithField("key", filepath).Info("Evicting file from the disk") + abc.lru.Remove(path.Join(abc.path, filepath)) +} + +func (abc *ArchiveBucketCache) onEviction(key, value interface{}) { + path := key.(string) + os.Remove(NameLockfile(path)) // just in case + if err := os.Remove(path); err != nil { // best effort removal + abc.log.WithError(err). + WithField("key", path). + Warn("Removal failed after cache eviction") + } +} + +func (abc *ArchiveBucketCache) createLocal(filepath string) (*os.File, error) { + localPath := path.Join(abc.path, filepath) + if err := os.MkdirAll(path.Dir(localPath), 0755 /* drwxr-xr-x */); err != nil { + return nil, err + } + + local, err := os.Create(localPath) /* mode -rw-rw-rw- */ + if err != nil { + return nil, err + } + _, err = os.Create(NameLockfile(localPath)) + if err != nil { + return nil, err + } + + abc.lru.Add(localPath, struct{}{}) // just use the cache as an array + return local, nil +} + +func NameLockfile(file string) string { + return file + ".lock" +} + +// The below is a helper interface so that we can use io.TeeReader to write +// data locally immediately as we read it remotely. + +type trc struct { + io.Reader + close func() error + closed bool // prevents a double-close +} + +func (t trc) Close() error { + if t.closed { + return nil + } + + return t.close() +} + +func teeReadCloser(r io.ReadCloser, w io.WriteCloser, onClose func() error) io.ReadCloser { + closer := trc{ + Reader: io.TeeReader(r, w), + closed: false, + } + closer.close = func() error { + if closer.closed { + return nil + } + + // Always run all closers, but return the first error + err1 := r.Close() + err2 := w.Close() + err3 := onClose() + + closer.closed = true + if err1 != nil { + return err1 + } else if err2 != nil { + return err2 + } + return err3 + } + + return closer +} diff --git a/historyarchive/archive_pool.go b/historyarchive/archive_pool.go index 022259d7f0..984e6d1306 100644 --- a/historyarchive/archive_pool.go +++ b/historyarchive/archive_pool.go @@ -31,11 +31,7 @@ func NewArchivePool(archiveURLs []string, config ConnectOptions) (ArchivePool, e // Try connecting to all of the listed archives, but only store valid ones. var validArchives ArchivePool for _, url := range archiveURLs { - archive, err := Connect( - url, - config, - ) - + archive, err := Connect(url, config) if err != nil { lastErr = errors.Wrapf(err, "Error connecting to history archive (%s)", url) continue diff --git a/historyarchive/archive_test.go b/historyarchive/archive_test.go index 78eed45378..4518315f3e 100644 --- a/historyarchive/archive_test.go +++ b/historyarchive/archive_test.go @@ -16,6 +16,7 @@ import ( "net/http" "net/http/httptest" "os" + "path/filepath" "strings" "testing" @@ -41,7 +42,13 @@ func GetTestS3Archive() *Archive { } func GetTestMockArchive() *Archive { - return MustConnect("mock://test", ConnectOptions{CheckpointFrequency: 64}) + return MustConnect("mock://test", ConnectOptions{ + CheckpointFrequency: 64, + CacheConfig: CacheOptions{ + Cache: true, + Path: filepath.Join(os.TempDir(), "history-archive-test-cache"), + MaxFiles: 5, + }}) } var tmpdirs []string @@ -630,11 +637,32 @@ func TestGetLedgers(t *testing.T) { []xdrEntry{results[0], results[1], results[2]}, ) + stats := archive.GetStats()[0] ledgers, err := archive.GetLedgers(1000, 1002) + + assert.NoError(t, err) + assert.Len(t, ledgers, 3) + // it started at 1, incurred 6 requests total, 3 queries, 3 downloads + assert.EqualValues(t, 7, stats.GetRequests()) + // started 0, incurred 3 file downloads + assert.EqualValues(t, 3, stats.GetDownloads()) + for i, seq := range []uint32{1000, 1001, 1002} { + ledger := ledgers[seq] + assertXdrEquals(t, ledgerHeaders[i], ledger.Header) + assertXdrEquals(t, transactions[i], ledger.Transaction) + assertXdrEquals(t, results[i], ledger.TransactionResult) + } + + // Repeat the same check but ensure the cache was used + ledgers, err = archive.GetLedgers(1000, 1002) // all cached assert.NoError(t, err) assert.Len(t, ledgers, 3) - assert.Equal(t, uint32(7), archive.GetStats()[0].GetRequests()) // it started at 1, incurred 6 requests total, 3 queries, 3 downloads - assert.Equal(t, uint32(3), archive.GetStats()[0].GetDownloads()) // started 0, incurred 3 file downloads + + // downloads should not change because of the cache + assert.EqualValues(t, 3, stats.GetDownloads()) + // but requests increase because of 3 fetches to categories + assert.EqualValues(t, 10, stats.GetRequests()) + assert.EqualValues(t, 3, stats.GetCacheHits()) for i, seq := range []uint32{1000, 1001, 1002} { ledger := ledgers[seq] assertXdrEquals(t, ledgerHeaders[i], ledger.Header) diff --git a/historyarchive/mocks.go b/historyarchive/mocks.go index b256d0d7b9..fe497ec36e 100644 --- a/historyarchive/mocks.go +++ b/historyarchive/mocks.go @@ -132,3 +132,8 @@ func (m *MockArchiveStats) GetBackendName() string { a := m.Called() return a.Get(0).(string) } + +func (m *MockArchiveStats) GetCacheHits() uint32 { + a := m.Called() + return a.Get(0).(uint32) +} diff --git a/historyarchive/stats.go b/historyarchive/stats.go new file mode 100644 index 0000000000..c182853d1b --- /dev/null +++ b/historyarchive/stats.go @@ -0,0 +1,57 @@ +package historyarchive + +import "sync/atomic" + +// golang will auto wrap them back to 0 if they overflow after addition. +type archiveStats struct { + requests atomic.Uint32 + fileDownloads atomic.Uint32 + fileUploads atomic.Uint32 + cacheHits atomic.Uint32 + backendName string +} + +type ArchiveStats interface { + GetRequests() uint32 + GetDownloads() uint32 + GetUploads() uint32 + GetCacheHits() uint32 + GetBackendName() string +} + +func (as *archiveStats) incrementDownloads() { + as.fileDownloads.Add(1) + as.incrementRequests() +} + +func (as *archiveStats) incrementUploads() { + as.fileUploads.Add(1) + as.incrementRequests() +} + +func (as *archiveStats) incrementRequests() { + as.requests.Add(1) +} + +func (as *archiveStats) incrementCacheHits() { + as.cacheHits.Add(1) +} + +func (as *archiveStats) GetRequests() uint32 { + return as.requests.Load() +} + +func (as *archiveStats) GetDownloads() uint32 { + return as.fileDownloads.Load() +} + +func (as *archiveStats) GetUploads() uint32 { + return as.fileUploads.Load() +} + +func (as *archiveStats) GetBackendName() string { + return as.backendName +} +func (as *archiveStats) GetCacheHits() uint32 { + return as.cacheHits.Load() +} diff --git a/historyarchive/util.go b/historyarchive/util.go index b2a7c96778..c0a59dda5a 100644 --- a/historyarchive/util.go +++ b/historyarchive/util.go @@ -7,10 +7,11 @@ package historyarchive import ( "bufio" "fmt" - log "github.com/sirupsen/logrus" "io" "net/http" "path" + + log "github.com/sirupsen/logrus" ) func makeTicker(onTick func(uint)) chan bool { diff --git a/historyarchive/xdrstream.go b/historyarchive/xdrstream.go index e0d9745585..de8efc3bb6 100644 --- a/historyarchive/xdrstream.go +++ b/historyarchive/xdrstream.go @@ -134,11 +134,7 @@ func (x *XdrStream) closeReaders() error { err = err2 } } - if x.rdr2 != nil { - if err2 := x.rdr2.Close(); err2 != nil { - err = err2 - } - } + if x.gzipReader != nil { if err2 := x.gzipReader.Close(); err2 != nil { err = err2 diff --git a/services/horizon/CHANGELOG.md b/services/horizon/CHANGELOG.md index 2ba1ccee94..25273f964c 100644 --- a/services/horizon/CHANGELOG.md +++ b/services/horizon/CHANGELOG.md @@ -8,18 +8,18 @@ file. This project adheres to [Semantic Versioning](http://semver.org/). ## 2.28.0 ### Fixed -- Trade agg rebuild errors reported on `db reingest range` with parellel workers ([5168](https://github.com/stellar/go/pull/5168)) -- http archive requests include user agent ([5166](https://github.com/stellar/go/pull/5166)) +- Ingestion performance timing is improved ([4909](https://github.com/stellar/go/issues/4909)) +- Trade aggregation rebuild errors reported on `db reingest range` with parallel workers ([5168](https://github.com/stellar/go/pull/5168)) +- Limited global flags displayed on cli help output ([5077](https://github.com/stellar/go/pull/5077)) - Network usage has been significantly reduced with caching. **Warning:** To support the cache, disk requirements may increase by up to 15GB ([5171](https://github.com/stellar/go/pull/5171)). ### Added -- improve ingestion performance timing ([4909](https://github.com/stellar/go/issues/4909)) -- http archive requests include metrics ([5166](https://github.com/stellar/go/pull/5166)) -- Add a deprecation warning for using command-line flags when running Horizon ([5051](https://github.com/stellar/go/pull/5051)) -- limit global flags displayed on cli help output ([5077](https://github.com/stellar/go/pull/5077)) +- We now include metrics for history archive requests ([5166](https://github.com/stellar/go/pull/5166)) +- Http history archive requests now include a unique user agent ([5166](https://github.com/stellar/go/pull/5166)) +- Added a deprecation warning for using command-line flags when running Horizon ([5051](https://github.com/stellar/go/pull/5051)) ### Breaking Changes -- Removed configuration flags `--stellar-core-url-db`, `--cursor-name` `--skip-cursor-update` , they were related to legacy non-captive core ingestion and are no longer usable. +- Removed configuration flags `--stellar-core-url-db`, `--cursor-name` `--skip-cursor-update` , they were related to legacy non-captive core ingestion and are no longer usable. ## 2.27.0 diff --git a/services/horizon/internal/ingest/fsm.go b/services/horizon/internal/ingest/fsm.go index 59a1a7c969..e0c667b033 100644 --- a/services/horizon/internal/ingest/fsm.go +++ b/services/horizon/internal/ingest/fsm.go @@ -590,6 +590,11 @@ func addHistoryArchiveStatsMetrics(s *system, stats []historyarchive.ArchiveStat "source": historyServerStat.GetBackendName(), "type": "requests"}). Add(float64(historyServerStat.GetRequests())) + s.Metrics().HistoryArchiveStatsCounter. + With(prometheus.Labels{ + "source": historyServerStat.GetBackendName(), + "type": "cache_hits"}). + Add(float64(historyServerStat.GetCacheHits())) } } diff --git a/services/horizon/internal/ingest/main.go b/services/horizon/internal/ingest/main.go index f064c4695c..ccf2e6786e 100644 --- a/services/horizon/internal/ingest/main.go +++ b/services/horizon/internal/ingest/main.go @@ -6,6 +6,7 @@ package ingest import ( "context" "fmt" + "path" "runtime" "sync" "time" @@ -226,6 +227,11 @@ func NewSystem(config Config) (System, error) { NetworkPassphrase: config.NetworkPassphrase, CheckpointFrequency: config.CheckpointFrequency, UserAgent: fmt.Sprintf("horizon/%s golang/%s", apkg.Version(), runtime.Version()), + CacheConfig: historyarchive.CacheOptions{ + Cache: true, + Path: path.Join(config.CaptiveCoreStoragePath, "bucket-cache"), + MaxFiles: 50, + }, }, ) if err != nil { diff --git a/services/horizon/internal/ingest/resume_state_test.go b/services/horizon/internal/ingest/resume_state_test.go index d989e7a9e5..f1f8b2ce2a 100644 --- a/services/horizon/internal/ingest/resume_state_test.go +++ b/services/horizon/internal/ingest/resume_state_test.go @@ -266,6 +266,7 @@ func (s *ResumeTestTestSuite) mockSuccessfulIngestion() { mockStats.On("GetDownloads").Return(uint32(0)) mockStats.On("GetRequests").Return(uint32(0)) mockStats.On("GetUploads").Return(uint32(0)) + mockStats.On("GetCacheHits").Return(uint32(0)) s.historyAdapter.On("GetStats").Return([]historyarchive.ArchiveStats{mockStats}).Once() s.runner.On("RunAllProcessorsOnLedger", mock.AnythingOfType("xdr.LedgerCloseMeta")). @@ -382,6 +383,7 @@ func (s *ResumeTestTestSuite) TestReapingObjectsDisabled() { mockStats.On("GetDownloads").Return(uint32(0)) mockStats.On("GetRequests").Return(uint32(0)) mockStats.On("GetUploads").Return(uint32(0)) + mockStats.On("GetCacheHits").Return(uint32(0)) s.historyAdapter.On("GetStats").Return([]historyarchive.ArchiveStats{mockStats}).Once() // Reap lookup tables not executed @@ -431,6 +433,7 @@ func (s *ResumeTestTestSuite) TestErrorReapingObjectsIgnored() { mockStats.On("GetDownloads").Return(uint32(0)) mockStats.On("GetRequests").Return(uint32(0)) mockStats.On("GetUploads").Return(uint32(0)) + mockStats.On("GetCacheHits").Return(uint32(0)) s.historyAdapter.On("GetStats").Return([]historyarchive.ArchiveStats{mockStats}).Once() next, err := resumeState{latestSuccessfullyProcessedLedger: 100}.run(s.system) From a8b5c8ef068a8abebb152c19113b845c2abf1df3 Mon Sep 17 00:00:00 2001 From: George Date: Mon, 22 Jan 2024 16:52:20 -0800 Subject: [PATCH 10/21] services/horizon: Bump the history archive cache size to increase hit rates (#5177) --- services/horizon/internal/ingest/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/horizon/internal/ingest/main.go b/services/horizon/internal/ingest/main.go index ccf2e6786e..c264282faf 100644 --- a/services/horizon/internal/ingest/main.go +++ b/services/horizon/internal/ingest/main.go @@ -230,7 +230,7 @@ func NewSystem(config Config) (System, error) { CacheConfig: historyarchive.CacheOptions{ Cache: true, Path: path.Join(config.CaptiveCoreStoragePath, "bucket-cache"), - MaxFiles: 50, + MaxFiles: 150, }, }, ) From 0ddb36f4085f1164501b8d0081991a8abb13bae9 Mon Sep 17 00:00:00 2001 From: George Date: Mon, 22 Jan 2024 18:35:50 -0800 Subject: [PATCH 11/21] historyarchive: Make the library target the same log as Horizon (#5178) * Change logging provider to Horizon's default rather than logrus --- historyarchive/archive_cache.go | 10 +++++++--- services/horizon/internal/ingest/main.go | 1 + support/log/entry.go | 3 +-- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/historyarchive/archive_cache.go b/historyarchive/archive_cache.go index a3029a7ae4..a3990428b0 100644 --- a/historyarchive/archive_cache.go +++ b/historyarchive/archive_cache.go @@ -6,7 +6,7 @@ import ( "path" lru "github.com/hashicorp/golang-lru" - log "github.com/sirupsen/logrus" + log "github.com/stellar/go/support/log" ) type CacheOptions struct { @@ -14,6 +14,7 @@ type CacheOptions struct { Path string MaxFiles uint + Log *log.Entry } type ArchiveBucketCache struct { @@ -25,8 +26,11 @@ type ArchiveBucketCache struct { // MakeArchiveBucketCache creates a cache on the disk at the given path that // acts as an LRU cache, mimicking a particular upstream. func MakeArchiveBucketCache(opts CacheOptions) (*ArchiveBucketCache, error) { - log_ := log. - WithField("subservice", "fs-cache"). + log_ := opts.Log + if opts.Log == nil { + log_ = log.WithField("subservice", "fs-cache") + } + log_ = log_. WithField("path", opts.Path). WithField("cap", opts.MaxFiles) diff --git a/services/horizon/internal/ingest/main.go b/services/horizon/internal/ingest/main.go index c264282faf..3d8fcf2166 100644 --- a/services/horizon/internal/ingest/main.go +++ b/services/horizon/internal/ingest/main.go @@ -230,6 +230,7 @@ func NewSystem(config Config) (System, error) { CacheConfig: historyarchive.CacheOptions{ Cache: true, Path: path.Join(config.CaptiveCoreStoragePath, "bucket-cache"), + Log: log.WithField("subservice", "ha-cache"), MaxFiles: 150, }, }, diff --git a/support/log/entry.go b/support/log/entry.go index a4661cb8c0..9b3b596025 100644 --- a/support/log/entry.go +++ b/support/log/entry.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "io/ioutil" gerr "github.com/go-errors/errors" "github.com/sirupsen/logrus" @@ -198,7 +197,7 @@ func (e *Entry) StartTest(level logrus.Level) func() []logrus.Entry { e.entry.Logger.AddHook(hook) old := e.entry.Logger.Out - e.entry.Logger.Out = ioutil.Discard + e.entry.Logger.Out = io.Discard oldLevel := e.entry.Logger.GetLevel() e.entry.Logger.SetLevel(level) From bfaf9e18b840d97e4d12afc3e2cd2bd56527642d Mon Sep 17 00:00:00 2001 From: urvisavla Date: Wed, 24 Jan 2024 17:17:23 -0800 Subject: [PATCH 12/21] services/horizon: Add DISABLE_SOROBAN_INGEST flag to skip soroban ingestion processing (#5176) --- .github/workflows/horizon.yml | 2 +- services/horizon/CHANGELOG.md | 5 + services/horizon/cmd/db.go | 1 + services/horizon/internal/config.go | 2 + services/horizon/internal/flags.go | 11 + services/horizon/internal/ingest/main.go | 2 + .../internal/ingest/processor_runner.go | 14 +- .../internal/ingest/processor_runner_test.go | 5 +- .../ingest/processors/effects_processor.go | 22 +- .../processors/effects_processor_test.go | 1 + .../ingest/processors/operations_processor.go | 52 +- .../processors/operations_processor_test.go | 60 ++ .../processors/transactions_processor.go | 29 +- .../processors/transactions_processor_test.go | 2 +- .../horizon/internal/ingest/verify_test.go | 4 +- services/horizon/internal/init.go | 1 + .../integration/invokehostfunction_test.go | 103 +++- .../horizon/internal/integration/sac_test.go | 553 +++++++++++------- 18 files changed, 608 insertions(+), 261 deletions(-) diff --git a/.github/workflows/horizon.yml b/.github/workflows/horizon.yml index 873fcd0243..e7bf35cb34 100644 --- a/.github/workflows/horizon.yml +++ b/.github/workflows/horizon.yml @@ -120,7 +120,7 @@ jobs: key: ${{ env.COMBINED_SOURCE_HASH }} - if: ${{ steps.horizon_binary_tests_hash.outputs.cache-hit != 'true' }} - run: go test -race -timeout 45m -v ./services/horizon/internal/integration/... + run: go test -race -timeout 65m -v ./services/horizon/internal/integration/... - name: Save Horizon binary and integration tests source hash to cache if: ${{ success() && steps.horizon_binary_tests_hash.outputs.cache-hit != 'true' }} diff --git a/services/horizon/CHANGELOG.md b/services/horizon/CHANGELOG.md index 25273f964c..0899744f94 100644 --- a/services/horizon/CHANGELOG.md +++ b/services/horizon/CHANGELOG.md @@ -17,6 +17,11 @@ file. This project adheres to [Semantic Versioning](http://semver.org/). - We now include metrics for history archive requests ([5166](https://github.com/stellar/go/pull/5166)) - Http history archive requests now include a unique user agent ([5166](https://github.com/stellar/go/pull/5166)) - Added a deprecation warning for using command-line flags when running Horizon ([5051](https://github.com/stellar/go/pull/5051)) +- New optional config `DISABLE_SOROBAN_INGEST` ([5175](https://github.com/stellar/go/issues/5175)). Defaults to `FALSE`, when `TRUE` and a soroban transaction is ingested, the following will occur: + * no effects will be generated for contract invocations. + * history_transactions.tx_meta column will have serialized xdr that equates to an empty `xdr.TransactionMeta.V3`, `Operations`, `TxChangesAfter`, `TxChangesBefore` will empty arrays and `SorobanMeta` will be nil. + * API transaction model for `result_meta_xdr` will have same empty serialized xdr for `xdr.TransactionMeta.V3`, `Operations`, `TxChangesAfter`, `TxChangesBefore` will empty arrays and `SorobanMeta` will be nil. + * API `Operation` model for `InvokeHostFunctionOp` type, will have empty `asset_balance_changes` ### Breaking Changes - Removed configuration flags `--stellar-core-url-db`, `--cursor-name` `--skip-cursor-update` , they were related to legacy non-captive core ingestion and are no longer usable. diff --git a/services/horizon/cmd/db.go b/services/horizon/cmd/db.go index 725df622b0..07bbf975fa 100644 --- a/services/horizon/cmd/db.go +++ b/services/horizon/cmd/db.go @@ -419,6 +419,7 @@ func runDBReingestRange(ledgerRanges []history.LedgerRange, reingestForce bool, RoundingSlippageFilter: config.RoundingSlippageFilter, EnableIngestionFiltering: config.EnableIngestionFiltering, MaxLedgerPerFlush: maxLedgersPerFlush, + SkipSorobanIngestion: config.SkipSorobanIngestion, } if ingestConfig.HistorySession, err = db.Open("postgres", config.DatabaseURL); err != nil { diff --git a/services/horizon/internal/config.go b/services/horizon/internal/config.go index 7454f52bb7..8fb31075b8 100644 --- a/services/horizon/internal/config.go +++ b/services/horizon/internal/config.go @@ -108,4 +108,6 @@ type Config struct { Network string // DisableTxSub disables transaction submission functionality for Horizon. DisableTxSub bool + // SkipSorobanIngestion skips Soroban related ingestion processing. + SkipSorobanIngestion bool } diff --git a/services/horizon/internal/flags.go b/services/horizon/internal/flags.go index 40bfc08afe..eb229c65b2 100644 --- a/services/horizon/internal/flags.go +++ b/services/horizon/internal/flags.go @@ -57,6 +57,8 @@ const ( EnableIngestionFilteringFlagName = "exp-enable-ingestion-filtering" // DisableTxSubFlagName is the command line flag for disabling transaction submission feature of Horizon DisableTxSubFlagName = "disable-tx-sub" + // SkipSorobanIngestionFlagName is the command line flag for disabling Soroban related ingestion processing + SkipSorobanIngestionFlagName = "disable-soroban-ingest" // StellarPubnet is a constant representing the Stellar public network StellarPubnet = "pubnet" @@ -730,6 +732,15 @@ func Flags() (*Config, support.ConfigOptions) { HistoryArchiveURLsFlagName, CaptiveCoreConfigPathName), UsedInCommands: IngestionCommands, }, + &support.ConfigOption{ + Name: SkipSorobanIngestionFlagName, + ConfigKey: &config.SkipSorobanIngestion, + OptType: types.Bool, + FlagDefault: false, + Required: false, + Usage: "excludes Soroban data during ingestion processing", + UsedInCommands: IngestionCommands, + }, } return config, flags diff --git a/services/horizon/internal/ingest/main.go b/services/horizon/internal/ingest/main.go index 3d8fcf2166..7dfaea366e 100644 --- a/services/horizon/internal/ingest/main.go +++ b/services/horizon/internal/ingest/main.go @@ -107,6 +107,8 @@ type Config struct { EnableIngestionFiltering bool MaxLedgerPerFlush uint32 + + SkipSorobanIngestion bool } const ( diff --git a/services/horizon/internal/ingest/processor_runner.go b/services/horizon/internal/ingest/processor_runner.go index 34b977c03e..a09442b49d 100644 --- a/services/horizon/internal/ingest/processor_runner.go +++ b/services/horizon/internal/ingest/processor_runner.go @@ -111,6 +111,7 @@ func buildChangeProcessor( source ingestionSource, ledgerSequence uint32, networkPassphrase string, + skipSorobanIngestion bool, ) *groupChangeProcessors { statsChangeProcessor := &statsChangeProcessor{ StatsChangeProcessor: changeStats, @@ -144,13 +145,13 @@ func (s *ProcessorRunner) buildTransactionProcessor(ledgersProcessor *processors processors := []horizonTransactionProcessor{ statsLedgerTransactionProcessor, - processors.NewEffectProcessor(accountLoader, s.historyQ.NewEffectBatchInsertBuilder(), s.config.NetworkPassphrase), + processors.NewEffectProcessor(accountLoader, s.historyQ.NewEffectBatchInsertBuilder(), s.config.NetworkPassphrase, s.config.SkipSorobanIngestion), ledgersProcessor, - processors.NewOperationProcessor(s.historyQ.NewOperationBatchInsertBuilder(), s.config.NetworkPassphrase), + processors.NewOperationProcessor(s.historyQ.NewOperationBatchInsertBuilder(), s.config.NetworkPassphrase, s.config.SkipSorobanIngestion), tradeProcessor, processors.NewParticipantsProcessor(accountLoader, s.historyQ.NewTransactionParticipantsBatchInsertBuilder(), s.historyQ.NewOperationParticipantBatchInsertBuilder()), - processors.NewTransactionProcessor(s.historyQ.NewTransactionBatchInsertBuilder()), + processors.NewTransactionProcessor(s.historyQ.NewTransactionBatchInsertBuilder(), s.config.SkipSorobanIngestion), processors.NewClaimableBalancesTransactionProcessor(cbLoader, s.historyQ.NewTransactionClaimableBalanceBatchInsertBuilder(), s.historyQ.NewOperationClaimableBalanceBatchInsertBuilder()), processors.NewLiquidityPoolsTransactionProcessor(lpLoader, @@ -172,7 +173,10 @@ func (s *ProcessorRunner) buildFilteredOutProcessor() *groupTransactionProcessor // when in online mode, the submission result processor must always run (regardless of filtering) var p []horizonTransactionProcessor if s.config.EnableIngestionFiltering { - txSubProc := processors.NewTransactionFilteredTmpProcessor(s.historyQ.NewTransactionFilteredTmpBatchInsertBuilder()) + txSubProc := processors.NewTransactionFilteredTmpProcessor( + s.historyQ.NewTransactionFilteredTmpBatchInsertBuilder(), + s.config.SkipSorobanIngestion, + ) p = append(p, txSubProc) } @@ -235,6 +239,7 @@ func (s *ProcessorRunner) RunHistoryArchiveIngestion( historyArchiveSource, checkpointLedger, s.config.NetworkPassphrase, + s.config.SkipSorobanIngestion, ) if checkpointLedger == 1 { @@ -493,6 +498,7 @@ func (s *ProcessorRunner) RunAllProcessorsOnLedger(ledger xdr.LedgerCloseMeta) ( ledgerSource, ledger.LedgerSequence(), s.config.NetworkPassphrase, + s.config.SkipSorobanIngestion, ) err = s.runChangeProcessorOnLedger(groupChangeProcessors, ledger) if err != nil { diff --git a/services/horizon/internal/ingest/processor_runner_test.go b/services/horizon/internal/ingest/processor_runner_test.go index eaeca95661..ddac48aa82 100644 --- a/services/horizon/internal/ingest/processor_runner_test.go +++ b/services/horizon/internal/ingest/processor_runner_test.go @@ -180,7 +180,7 @@ func TestProcessorRunnerBuildChangeProcessor(t *testing.T) { } stats := &ingest.StatsChangeProcessor{} - processor := buildChangeProcessor(runner.historyQ, stats, ledgerSource, 123, "") + processor := buildChangeProcessor(runner.historyQ, stats, ledgerSource, 123, "", false) assert.IsType(t, &groupChangeProcessors{}, processor) assert.IsType(t, &statsChangeProcessor{}, processor.processors[0]) @@ -201,7 +201,7 @@ func TestProcessorRunnerBuildChangeProcessor(t *testing.T) { filters: &MockFilters{}, } - processor = buildChangeProcessor(runner.historyQ, stats, historyArchiveSource, 456, "") + processor = buildChangeProcessor(runner.historyQ, stats, historyArchiveSource, 456, "", false) assert.IsType(t, &groupChangeProcessors{}, processor) assert.IsType(t, &statsChangeProcessor{}, processor.processors[0]) @@ -271,6 +271,7 @@ func TestProcessorRunnerWithFilterEnabled(t *testing.T) { config := Config{ NetworkPassphrase: network.PublicNetworkPassphrase, EnableIngestionFiltering: true, + SkipSorobanIngestion: false, } q := &mockDBQ{} diff --git a/services/horizon/internal/ingest/processors/effects_processor.go b/services/horizon/internal/ingest/processors/effects_processor.go index 34e9f9169a..830632f5f5 100644 --- a/services/horizon/internal/ingest/processors/effects_processor.go +++ b/services/horizon/internal/ingest/processors/effects_processor.go @@ -28,17 +28,20 @@ type EffectProcessor struct { accountLoader *history.AccountLoader batch history.EffectBatchInsertBuilder network string + skipSoroban bool } func NewEffectProcessor( accountLoader *history.AccountLoader, batch history.EffectBatchInsertBuilder, network string, + skipSoroban bool, ) *EffectProcessor { return &EffectProcessor{ accountLoader: accountLoader, batch: batch, network: network, + skipSoroban: skipSoroban, } } @@ -50,14 +53,29 @@ func (p *EffectProcessor) ProcessTransaction( return nil } - for opi, op := range transaction.Envelope.Operations() { + elidedTransaction := transaction + + if p.skipSoroban && + elidedTransaction.UnsafeMeta.V == 3 && + elidedTransaction.UnsafeMeta.V3.SorobanMeta != nil { + elidedTransaction.UnsafeMeta.V3 = &xdr.TransactionMetaV3{ + Ext: xdr.ExtensionPoint{}, + TxChangesBefore: xdr.LedgerEntryChanges{}, + Operations: []xdr.OperationMeta{}, + TxChangesAfter: xdr.LedgerEntryChanges{}, + SorobanMeta: nil, + } + } + + for opi, op := range elidedTransaction.Envelope.Operations() { operation := transactionOperationWrapper{ index: uint32(opi), - transaction: transaction, + transaction: elidedTransaction, operation: op, ledgerSequence: uint32(lcm.LedgerSequence()), network: p.network, } + if err := operation.ingestEffects(p.accountLoader, p.batch); err != nil { return errors.Wrapf(err, "reading operation %v effects", operation.ID()) } diff --git a/services/horizon/internal/ingest/processors/effects_processor_test.go b/services/horizon/internal/ingest/processors/effects_processor_test.go index 0243768fde..70af21737a 100644 --- a/services/horizon/internal/ingest/processors/effects_processor_test.go +++ b/services/horizon/internal/ingest/processors/effects_processor_test.go @@ -143,6 +143,7 @@ func (s *EffectsProcessorTestSuiteLedger) SetupTest() { s.accountLoader, s.mockBatchInsertBuilder, networkPassphrase, + false, ) s.txs = []ingest.LedgerTransaction{ diff --git a/services/horizon/internal/ingest/processors/operations_processor.go b/services/horizon/internal/ingest/processors/operations_processor.go index b9a23229d5..84ef45f049 100644 --- a/services/horizon/internal/ingest/processors/operations_processor.go +++ b/services/horizon/internal/ingest/processors/operations_processor.go @@ -21,14 +21,16 @@ import ( // OperationProcessor operations processor type OperationProcessor struct { - batch history.OperationBatchInsertBuilder - network string + batch history.OperationBatchInsertBuilder + network string + skipSoroban bool } -func NewOperationProcessor(batch history.OperationBatchInsertBuilder, network string) *OperationProcessor { +func NewOperationProcessor(batch history.OperationBatchInsertBuilder, network string, skipSoroban bool) *OperationProcessor { return &OperationProcessor{ - batch: batch, - network: network, + batch: batch, + network: network, + skipSoroban: skipSoroban, } } @@ -36,11 +38,12 @@ func NewOperationProcessor(batch history.OperationBatchInsertBuilder, network st func (p *OperationProcessor) ProcessTransaction(lcm xdr.LedgerCloseMeta, transaction ingest.LedgerTransaction) error { for i, op := range transaction.Envelope.Operations() { operation := transactionOperationWrapper{ - index: uint32(i), - transaction: transaction, - operation: op, - ledgerSequence: lcm.LedgerSequence(), - network: p.network, + index: uint32(i), + transaction: transaction, + operation: op, + ledgerSequence: lcm.LedgerSequence(), + network: p.network, + skipSorobanDetails: p.skipSoroban, } details, err := operation.Details() if err != nil { @@ -81,11 +84,12 @@ func (p *OperationProcessor) Flush(ctx context.Context, session db.SessionInterf // transactionOperationWrapper represents the data for a single operation within a transaction type transactionOperationWrapper struct { - index uint32 - transaction ingest.LedgerTransaction - operation xdr.Operation - ledgerSequence uint32 - network string + index uint32 + transaction ingest.LedgerTransaction + operation xdr.Operation + ledgerSequence uint32 + network string + skipSorobanDetails bool } // ID returns the ID for the operation. @@ -265,6 +269,11 @@ func (operation *transactionOperationWrapper) IsPayment() bool { case xdr.OperationTypeAccountMerge: return true case xdr.OperationTypeInvokeHostFunction: + // #5175, may want to consider skipping this parsing of payment from contracts + // as part of eliding soroban ingestion aspects when DISABLE_SOROBAN_INGEST. + // but, may cause inconsistencies that aren't worth the gain, + // as payments won't be thoroughly accurate, i.e. a payment could have + // happened within a contract invoke. diagnosticEvents, err := operation.transaction.GetDiagnosticEvents() if err != nil { return false @@ -688,11 +697,18 @@ func (operation *transactionOperationWrapper) Details() (map[string]interface{}, } details["parameters"] = params - if balanceChanges, err := operation.parseAssetBalanceChangesFromContractEvents(); err != nil { - return nil, err + var balanceChanges []map[string]interface{} + var parseErr error + if operation.skipSorobanDetails { + // https://github.com/stellar/go/issues/5175 + // intentionally toggle off parsing soroban meta into "asset_balance_changes" + balanceChanges = make([]map[string]interface{}, 0) } else { - details["asset_balance_changes"] = balanceChanges + if balanceChanges, parseErr = operation.parseAssetBalanceChangesFromContractEvents(); parseErr != nil { + return nil, parseErr + } } + details["asset_balance_changes"] = balanceChanges case xdr.HostFunctionTypeHostFunctionTypeCreateContract: args := op.HostFunction.MustCreateContract() diff --git a/services/horizon/internal/ingest/processors/operations_processor_test.go b/services/horizon/internal/ingest/processors/operations_processor_test.go index 4b5fb376cd..275a6056e4 100644 --- a/services/horizon/internal/ingest/processors/operations_processor_test.go +++ b/services/horizon/internal/ingest/processors/operations_processor_test.go @@ -42,6 +42,7 @@ func (s *OperationsProcessorTestSuiteLedger) SetupTest() { s.processor = NewOperationProcessor( s.mockBatchInsertBuilder, "test network", + false, ) } @@ -375,6 +376,65 @@ func (s *OperationsProcessorTestSuiteLedger) TestOperationTypeInvokeHostFunction } s.Assert().Equal(found, 4, "should have one balance changed record for each of mint, burn, clawback, transfer") }) + + s.T().Run("InvokeContractAssetBalancesElidedFromDetails", func(t *testing.T) { + randomIssuer := keypair.MustRandom() + randomAsset := xdr.MustNewCreditAsset("TESTING", randomIssuer.Address()) + passphrase := "passphrase" + randomAccount := keypair.MustRandom().Address() + contractId := [32]byte{} + zeroContractStrKey, err := strkey.Encode(strkey.VersionByteContract, contractId[:]) + s.Assert().NoError(err) + + transferContractEvent := contractevents.GenerateEvent(contractevents.EventTypeTransfer, randomAccount, zeroContractStrKey, "", randomAsset, big.NewInt(10000000), passphrase) + burnContractEvent := contractevents.GenerateEvent(contractevents.EventTypeBurn, zeroContractStrKey, "", "", randomAsset, big.NewInt(10000000), passphrase) + mintContractEvent := contractevents.GenerateEvent(contractevents.EventTypeMint, "", zeroContractStrKey, randomAccount, randomAsset, big.NewInt(10000000), passphrase) + clawbackContractEvent := contractevents.GenerateEvent(contractevents.EventTypeClawback, zeroContractStrKey, "", randomAccount, randomAsset, big.NewInt(10000000), passphrase) + + tx = ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 3, + V3: &xdr.TransactionMetaV3{ + SorobanMeta: &xdr.SorobanTransactionMeta{ + Events: []xdr.ContractEvent{ + transferContractEvent, + burnContractEvent, + mintContractEvent, + clawbackContractEvent, + }, + }, + }, + }, + } + wrapper := transactionOperationWrapper{ + skipSorobanDetails: true, + transaction: tx, + operation: xdr.Operation{ + SourceAccount: &source, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeInvokeHostFunction, + InvokeHostFunctionOp: &xdr.InvokeHostFunctionOp{ + HostFunction: xdr.HostFunction{ + Type: xdr.HostFunctionTypeHostFunctionTypeInvokeContract, + InvokeContract: &xdr.InvokeContractArgs{ + ContractAddress: xdr.ScAddress{ + Type: xdr.ScAddressTypeScAddressTypeContract, + ContractId: &xdr.Hash{0x1, 0x2}, + }, + FunctionName: "foo", + Args: xdr.ScVec{}, + }, + }, + }, + }, + }, + network: passphrase, + } + + details, err := wrapper.Details() + s.Assert().NoError(err) + s.Assert().Len(details["asset_balance_changes"], 0, "for invokehostfn op, no asset balances should be in details when skip soroban is enabled") + }) } func (s *OperationsProcessorTestSuiteLedger) assertInvokeHostFunctionParameter(parameters []map[string]string, paramPosition int, expectedType string, expectedVal xdr.ScVal) { diff --git a/services/horizon/internal/ingest/processors/transactions_processor.go b/services/horizon/internal/ingest/processors/transactions_processor.go index 871c72624a..b82934d86a 100644 --- a/services/horizon/internal/ingest/processors/transactions_processor.go +++ b/services/horizon/internal/ingest/processors/transactions_processor.go @@ -11,23 +11,40 @@ import ( ) type TransactionProcessor struct { - batch history.TransactionBatchInsertBuilder + batch history.TransactionBatchInsertBuilder + skipSoroban bool } -func NewTransactionFilteredTmpProcessor(batch history.TransactionBatchInsertBuilder) *TransactionProcessor { +func NewTransactionFilteredTmpProcessor(batch history.TransactionBatchInsertBuilder, skipSoroban bool) *TransactionProcessor { return &TransactionProcessor{ - batch: batch, + batch: batch, + skipSoroban: skipSoroban, } } -func NewTransactionProcessor(batch history.TransactionBatchInsertBuilder) *TransactionProcessor { +func NewTransactionProcessor(batch history.TransactionBatchInsertBuilder, skipSoroban bool) *TransactionProcessor { return &TransactionProcessor{ - batch: batch, + batch: batch, + skipSoroban: skipSoroban, } } func (p *TransactionProcessor) ProcessTransaction(lcm xdr.LedgerCloseMeta, transaction ingest.LedgerTransaction) error { - if err := p.batch.Add(transaction, lcm.LedgerSequence()); err != nil { + elidedTransaction := transaction + + if p.skipSoroban && + elidedTransaction.UnsafeMeta.V == 3 && + elidedTransaction.UnsafeMeta.MustV3().SorobanMeta != nil { + elidedTransaction.UnsafeMeta.V3 = &xdr.TransactionMetaV3{ + Ext: xdr.ExtensionPoint{}, + TxChangesBefore: xdr.LedgerEntryChanges{}, + Operations: []xdr.OperationMeta{}, + TxChangesAfter: xdr.LedgerEntryChanges{}, + SorobanMeta: nil, + } + } + + if err := p.batch.Add(elidedTransaction, lcm.LedgerSequence()); err != nil { return errors.Wrap(err, "Error batch inserting transaction rows") } diff --git a/services/horizon/internal/ingest/processors/transactions_processor_test.go b/services/horizon/internal/ingest/processors/transactions_processor_test.go index 987e8ce6f9..873a72af05 100644 --- a/services/horizon/internal/ingest/processors/transactions_processor_test.go +++ b/services/horizon/internal/ingest/processors/transactions_processor_test.go @@ -29,7 +29,7 @@ func TestTransactionsProcessorTestSuiteLedger(t *testing.T) { func (s *TransactionsProcessorTestSuiteLedger) SetupTest() { s.ctx = context.Background() s.mockBatchInsertBuilder = &history.MockTransactionsBatchInsertBuilder{} - s.processor = NewTransactionProcessor(s.mockBatchInsertBuilder) + s.processor = NewTransactionProcessor(s.mockBatchInsertBuilder, false) } func (s *TransactionsProcessorTestSuiteLedger) TearDownTest() { diff --git a/services/horizon/internal/ingest/verify_test.go b/services/horizon/internal/ingest/verify_test.go index 901f21a0ca..e3c0e4ec56 100644 --- a/services/horizon/internal/ingest/verify_test.go +++ b/services/horizon/internal/ingest/verify_test.go @@ -292,7 +292,7 @@ func TestStateVerifierLockBusy(t *testing.T) { tt.Assert.NoError(q.BeginTx(tt.Ctx, &sql.TxOptions{})) checkpointLedger := uint32(63) - changeProcessor := buildChangeProcessor(q, &ingest.StatsChangeProcessor{}, ledgerSource, checkpointLedger, "") + changeProcessor := buildChangeProcessor(q, &ingest.StatsChangeProcessor{}, ledgerSource, checkpointLedger, "", false) gen := randxdr.NewGenerator() var changes []xdr.LedgerEntryChange @@ -350,7 +350,7 @@ func TestStateVerifier(t *testing.T) { ledger := rand.Int31() checkpointLedger := uint32(ledger - (ledger % 64) - 1) - changeProcessor := buildChangeProcessor(q, &ingest.StatsChangeProcessor{}, ledgerSource, checkpointLedger, "") + changeProcessor := buildChangeProcessor(q, &ingest.StatsChangeProcessor{}, ledgerSource, checkpointLedger, "", false) mockChangeReader := &ingest.MockChangeReader{} gen := randxdr.NewGenerator() diff --git a/services/horizon/internal/init.go b/services/horizon/internal/init.go index 1b6664b8ba..4078c7ad00 100644 --- a/services/horizon/internal/init.go +++ b/services/horizon/internal/init.go @@ -110,6 +110,7 @@ func initIngester(app *App) { EnableExtendedLogLedgerStats: app.config.IngestEnableExtendedLogLedgerStats, RoundingSlippageFilter: app.config.RoundingSlippageFilter, EnableIngestionFiltering: app.config.EnableIngestionFiltering, + SkipSorobanIngestion: app.config.SkipSorobanIngestion, }) if err != nil { diff --git a/services/horizon/internal/integration/invokehostfunction_test.go b/services/horizon/internal/integration/invokehostfunction_test.go index 275f0de23b..1b1edc091a 100644 --- a/services/horizon/internal/integration/invokehostfunction_test.go +++ b/services/horizon/internal/integration/invokehostfunction_test.go @@ -3,11 +3,13 @@ package integration import ( "crypto/sha256" "encoding/hex" + "fmt" "os" "path/filepath" "testing" "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/protocols/horizon" "github.com/stellar/go/protocols/horizon/operations" "github.com/stellar/go/services/horizon/internal/test/integration" "github.com/stellar/go/txnbuild" @@ -24,13 +26,42 @@ const increment_contract = "soroban_increment_contract.wasm" // Refer to ./services/horizon/internal/integration/contracts/README.md on how to recompile // contract code if needed to new wasm. -func TestContractInvokeHostFunctionInstallContract(t *testing.T) { +func TestInvokeHostFns(t *testing.T) { + // first test contracts when soroban processing is enabled + DisabledSoroban = false + runAllTests(t) + // now test same contracts when soroban processing is disabled + DisabledSoroban = true + runAllTests(t) +} + +func runAllTests(t *testing.T) { + tests := []struct { + name string + fn func(*testing.T) + }{ + {"CaseContractInvokeHostFunctionInstallContract", CaseContractInvokeHostFunctionInstallContract}, + {"CaseContractInvokeHostFunctionCreateContractByAddress", CaseContractInvokeHostFunctionCreateContractByAddress}, + {"CaseContractInvokeHostFunctionInvokeStatelessContractFn", CaseContractInvokeHostFunctionInvokeStatelessContractFn}, + {"CaseContractInvokeHostFunctionInvokeStatefulContractFn", CaseContractInvokeHostFunctionInvokeStatefulContractFn}, + } + + for _, tt := range tests { + t.Run(fmt.Sprintf("Soroban Processing Disabled = %v. ", DisabledSoroban)+tt.name, func(t *testing.T) { + tt.fn(t) + }) + } +} + +func CaseContractInvokeHostFunctionInstallContract(t *testing.T) { if integration.GetCoreMaxSupportedProtocol() < 20 { t.Skip("This test run does not support less than Protocol 20") } itest := integration.NewTest(t, integration.Config{ - ProtocolVersion: 20, + ProtocolVersion: 20, + HorizonEnvironment: map[string]string{ + "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)}, EnableSorobanRPC: true, }) @@ -46,6 +77,7 @@ func TestContractInvokeHostFunctionInstallContract(t *testing.T) { clientTx, err := itest.Client().TransactionDetail(tx.Hash) require.NoError(t, err) + verifySorobanMeta(t, clientTx) assert.Equal(t, tx.Hash, clientTx.Hash) var txResult xdr.TransactionResult @@ -71,16 +103,17 @@ func TestContractInvokeHostFunctionInstallContract(t *testing.T) { invokeHostFunctionOpJson, ok := clientInvokeOp.Embedded.Records[0].(operations.InvokeHostFunction) assert.True(t, ok) assert.Equal(t, invokeHostFunctionOpJson.Function, "HostFunctionTypeHostFunctionTypeUploadContractWasm") - } -func TestContractInvokeHostFunctionCreateContractByAddress(t *testing.T) { +func CaseContractInvokeHostFunctionCreateContractByAddress(t *testing.T) { if integration.GetCoreMaxSupportedProtocol() < 20 { t.Skip("This test run does not support less than Protocol 20") } itest := integration.NewTest(t, integration.Config{ - ProtocolVersion: 20, + ProtocolVersion: 20, + HorizonEnvironment: map[string]string{ + "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)}, EnableSorobanRPC: true, }) @@ -103,6 +136,7 @@ func TestContractInvokeHostFunctionCreateContractByAddress(t *testing.T) { clientTx, err := itest.Client().TransactionDetail(tx.Hash) require.NoError(t, err) + verifySorobanMeta(t, clientTx) assert.Equal(t, tx.Hash, clientTx.Hash) var txResult xdr.TransactionResult @@ -128,13 +162,15 @@ func TestContractInvokeHostFunctionCreateContractByAddress(t *testing.T) { assert.Equal(t, invokeHostFunctionOpJson.Salt, "110986164698320180327942133831752629430491002266485370052238869825166557303060") } -func TestContractInvokeHostFunctionInvokeStatelessContractFn(t *testing.T) { +func CaseContractInvokeHostFunctionInvokeStatelessContractFn(t *testing.T) { if integration.GetCoreMaxSupportedProtocol() < 20 { t.Skip("This test run does not support less than Protocol 20") } itest := integration.NewTest(t, integration.Config{ - ProtocolVersion: 20, + ProtocolVersion: 20, + HorizonEnvironment: map[string]string{ + "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)}, EnableSorobanRPC: true, }) @@ -196,6 +232,7 @@ func TestContractInvokeHostFunctionInvokeStatelessContractFn(t *testing.T) { clientTx, err := itest.Client().TransactionDetail(tx.Hash) require.NoError(t, err) + verifySorobanMeta(t, clientTx) assert.Equal(t, tx.Hash, clientTx.Hash) var txResult xdr.TransactionResult @@ -209,12 +246,14 @@ func TestContractInvokeHostFunctionInvokeStatelessContractFn(t *testing.T) { assert.True(t, ok) assert.Equal(t, invokeHostFunctionResult.Code, xdr.InvokeHostFunctionResultCodeInvokeHostFunctionSuccess) - // check the function response, should have summed the two input numbers - invokeResult := xdr.Uint64(9) - expectedScVal := xdr.ScVal{Type: xdr.ScValTypeScvU64, U64: &invokeResult} - var transactionMeta xdr.TransactionMeta - assert.NoError(t, xdr.SafeUnmarshalBase64(tx.ResultMetaXdr, &transactionMeta)) - assert.True(t, expectedScVal.Equals(transactionMeta.V3.SorobanMeta.ReturnValue)) + if !DisabledSoroban { + // check the function response, should have summed the two input numbers + invokeResult := xdr.Uint64(9) + expectedScVal := xdr.ScVal{Type: xdr.ScValTypeScvU64, U64: &invokeResult} + var transactionMeta xdr.TransactionMeta + assert.NoError(t, xdr.SafeUnmarshalBase64(tx.ResultMetaXdr, &transactionMeta)) + assert.True(t, expectedScVal.Equals(transactionMeta.V3.SorobanMeta.ReturnValue)) + } clientInvokeOp, err := itest.Client().Operations(horizonclient.OperationRequest{ ForTransaction: tx.Hash, @@ -237,13 +276,15 @@ func TestContractInvokeHostFunctionInvokeStatelessContractFn(t *testing.T) { assert.Equal(t, invokeHostFunctionOpJson.Parameters[3].Type, "U64") } -func TestContractInvokeHostFunctionInvokeStatefulContractFn(t *testing.T) { +func CaseContractInvokeHostFunctionInvokeStatefulContractFn(t *testing.T) { if integration.GetCoreMaxSupportedProtocol() < 20 { t.Skip("This test run does not support less than Protocol 20") } itest := integration.NewTest(t, integration.Config{ - ProtocolVersion: 20, + ProtocolVersion: 20, + HorizonEnvironment: map[string]string{ + "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)}, EnableSorobanRPC: true, }) @@ -292,6 +333,7 @@ func TestContractInvokeHostFunctionInvokeStatefulContractFn(t *testing.T) { clientTx, err := itest.Client().TransactionDetail(tx.Hash) require.NoError(t, err) + verifySorobanMeta(t, clientTx) assert.Equal(t, tx.Hash, clientTx.Hash) var txResult xdr.TransactionResult @@ -305,12 +347,14 @@ func TestContractInvokeHostFunctionInvokeStatefulContractFn(t *testing.T) { assert.True(t, ok) assert.Equal(t, invokeHostFunctionResult.Code, xdr.InvokeHostFunctionResultCodeInvokeHostFunctionSuccess) - // check the function response, should have incremented state from 0 to 1 - invokeResult := xdr.Uint32(1) - expectedScVal := xdr.ScVal{Type: xdr.ScValTypeScvU32, U32: &invokeResult} - var transactionMeta xdr.TransactionMeta - assert.NoError(t, xdr.SafeUnmarshalBase64(clientTx.ResultMetaXdr, &transactionMeta)) - assert.True(t, expectedScVal.Equals(transactionMeta.V3.SorobanMeta.ReturnValue)) + if !DisabledSoroban { + // check the function response, should have incremented state from 0 to 1 + invokeResult := xdr.Uint32(1) + expectedScVal := xdr.ScVal{Type: xdr.ScValTypeScvU32, U32: &invokeResult} + var transactionMeta xdr.TransactionMeta + assert.NoError(t, xdr.SafeUnmarshalBase64(clientTx.ResultMetaXdr, &transactionMeta)) + assert.True(t, expectedScVal.Equals(transactionMeta.V3.SorobanMeta.ReturnValue)) + } clientInvokeOp, err := itest.Client().Operations(horizonclient.OperationRequest{ ForTransaction: tx.Hash, @@ -384,3 +428,20 @@ func assembleCreateContractOp(t *testing.T, sourceAccount string, wasmFileName s SourceAccount: sourceAccount, } } + +func verifySorobanMeta(t *testing.T, clientTx horizon.Transaction) { + var txMeta xdr.TransactionMeta + err := xdr.SafeUnmarshalBase64(clientTx.ResultMetaXdr, &txMeta) + require.NoError(t, err) + require.NotNil(t, txMeta.V3) + + if !DisabledSoroban { + require.NotNil(t, txMeta.V3.SorobanMeta) + return + } + + require.Empty(t, txMeta.V3.Operations) + require.Empty(t, txMeta.V3.TxChangesAfter) + require.Empty(t, txMeta.V3.TxChangesBefore) + require.Nil(t, txMeta.V3.SorobanMeta) +} diff --git a/services/horizon/internal/integration/sac_test.go b/services/horizon/internal/integration/sac_test.go index 64c772b44c..c790b5a54c 100644 --- a/services/horizon/internal/integration/sac_test.go +++ b/services/horizon/internal/integration/sac_test.go @@ -2,6 +2,7 @@ package integration import ( "context" + "fmt" "math" "math/big" "strings" @@ -30,19 +31,127 @@ const sac_contract = "soroban_sac_test.wasm" // of the integration tests. const LongTermTTL = 10000 +var ( + DisabledSoroban bool +) + +func TestSAC(t *testing.T) { + // first test contracts when soroban processing is enabled + DisabledSoroban = false + runAllSACTests(t) + // now test same contracts when soroban processing is disabled + DisabledSoroban = true + runAllSACTests(t) +} + +func runAllSACTests(t *testing.T) { + tests := []struct { + name string + fn func(*testing.T) + }{ + {"CaseContractMintToAccount", CaseContractMintToAccount}, + {"CaseContractMintToContract", CaseContractMintToContract}, + {"CaseExpirationAndRestoration", CaseExpirationAndRestoration}, + {"CaseContractTransferBetweenAccounts", CaseContractTransferBetweenAccounts}, + {"CaseContractTransferBetweenAccountAndContract", CaseContractTransferBetweenAccountAndContract}, + {"CaseContractTransferBetweenContracts", CaseContractTransferBetweenContracts}, + {"CaseContractBurnFromAccount", CaseContractBurnFromAccount}, + {"CaseContractBurnFromContract", CaseContractBurnFromContract}, + {"CaseContractClawbackFromAccount", CaseContractClawbackFromAccount}, + {"CaseContractClawbackFromContract", CaseContractClawbackFromContract}, + } + + for _, tt := range tests { + t.Run(fmt.Sprintf("Soroban Processing Disabled = %v. ", DisabledSoroban)+tt.name, func(t *testing.T) { + tt.fn(t) + }) + } +} + // Tests use precompiled wasm bin files that are added to the testdata directory. // Refer to ./services/horizon/internal/integration/contracts/README.md on how to recompile // contract code if needed to new wasm. -func TestContractMintToAccount(t *testing.T) { +func createSAC(itest *integration.Test, asset xdr.Asset) { + invokeHostFunction := &txnbuild.InvokeHostFunction{ + HostFunction: xdr.HostFunction{ + Type: xdr.HostFunctionTypeHostFunctionTypeCreateContract, + CreateContract: &xdr.CreateContractArgs{ + ContractIdPreimage: xdr.ContractIdPreimage{ + Type: xdr.ContractIdPreimageTypeContractIdPreimageFromAsset, + FromAsset: &asset, + }, + Executable: xdr.ContractExecutable{ + Type: xdr.ContractExecutableTypeContractExecutableStellarAsset, + WasmHash: nil, + }, + }, + }, + SourceAccount: itest.Master().Address(), + } + _, _, preFlightOp := assertInvokeHostFnSucceeds(itest, itest.Master(), invokeHostFunction) + sourceAccount, extendTTLOp, minFee := itest.PreflightExtendExpiration( + itest.Master().Address(), + preFlightOp.Ext.SorobanData.Resources.Footprint.ReadWrite, + LongTermTTL, + ) + itest.MustSubmitOperationsWithFee(&sourceAccount, itest.Master(), minFee+txnbuild.MinBaseFee, &extendTTLOp) +} + +func invokeStoreSet( + itest *integration.Test, + storeContractID xdr.Hash, + ledgerEntryData xdr.LedgerEntryData, +) *txnbuild.InvokeHostFunction { + key := ledgerEntryData.MustContractData().Key + val := ledgerEntryData.MustContractData().Val + return &txnbuild.InvokeHostFunction{ + HostFunction: xdr.HostFunction{ + Type: xdr.HostFunctionTypeHostFunctionTypeInvokeContract, + InvokeContract: &xdr.InvokeContractArgs{ + ContractAddress: contractIDParam(storeContractID), + FunctionName: "set", + Args: xdr.ScVec{ + key, + val, + }, + }, + }, + SourceAccount: itest.Master().Address(), + } +} + +func invokeStoreRemove( + itest *integration.Test, + storeContractID xdr.Hash, + ledgerKey xdr.LedgerKey, +) *txnbuild.InvokeHostFunction { + return &txnbuild.InvokeHostFunction{ + HostFunction: xdr.HostFunction{ + Type: xdr.HostFunctionTypeHostFunctionTypeInvokeContract, + InvokeContract: &xdr.InvokeContractArgs{ + ContractAddress: contractIDParam(storeContractID), + FunctionName: "remove", + Args: xdr.ScVec{ + ledgerKey.MustContractData().Key, + }, + }, + }, + SourceAccount: itest.Master().Address(), + } +} + +func CaseContractMintToAccount(t *testing.T) { if integration.GetCoreMaxSupportedProtocol() < 20 { t.Skip("This test run does not support less than Protocol 20") } itest := integration.NewTest(t, integration.Config{ - ProtocolVersion: 20, - HorizonEnvironment: map[string]string{"INGEST_DISABLE_STATE_VERIFICATION": "true", "CONNECTION_TIMEOUT": "360000"}, - EnableSorobanRPC: true, + ProtocolVersion: 20, + HorizonEnvironment: map[string]string{ + "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban), + }, + EnableSorobanRPC: true, }) issuer := itest.Master().Address() @@ -72,17 +181,22 @@ func TestContractMintToAccount(t *testing.T) { balanceContracts: big.NewInt(0), contractID: stellarAssetContractID(itest, asset), }) - - fx := getTxEffects(itest, mintTx, asset) - require.Len(t, fx, 1) - creditEffect := assertContainsEffect(t, fx, - effects.EffectAccountCredited)[0].(effects.AccountCredited) - assert.Equal(t, recipientKp.Address(), creditEffect.Account) - assert.Equal(t, issuer, creditEffect.Asset.Issuer) - assert.Equal(t, code, creditEffect.Asset.Code) - assert.Equal(t, "20.0000000", creditEffect.Amount) assertEventPayments(itest, mintTx, asset, "", recipient.GetAccountID(), "mint", "20.0000000") + if !DisabledSoroban { + fx := getTxEffects(itest, mintTx, asset) + require.Len(t, fx, 1) + creditEffect := assertContainsEffect(t, fx, + effects.EffectAccountCredited)[0].(effects.AccountCredited) + assert.Equal(t, recipientKp.Address(), creditEffect.Account) + assert.Equal(t, issuer, creditEffect.Asset.Issuer) + assert.Equal(t, code, creditEffect.Asset.Code) + assert.Equal(t, "20.0000000", creditEffect.Amount) + } else { + fx := getTxEffects(itest, mintTx, asset) + require.Len(t, fx, 0) + } + otherRecipientKp, otherRecipient := itest.CreateAccount("100") itest.MustEstablishTrustline(otherRecipientKp, otherRecipient, txnbuild.MustAssetFromXDR(asset)) @@ -94,12 +208,6 @@ func TestContractMintToAccount(t *testing.T) { ) assertContainsBalance(itest, recipientKp, issuer, code, amount.MustParse("20")) assertContainsBalance(itest, otherRecipientKp, issuer, code, amount.MustParse("30")) - - fx = getTxEffects(itest, transferTx, asset) - assert.Len(t, fx, 2) - assertContainsEffect(t, fx, - effects.EffectAccountCredited, - effects.EffectAccountDebited) assertAssetStats(itest, assetStats{ code: code, issuer: issuer, @@ -111,41 +219,28 @@ func TestContractMintToAccount(t *testing.T) { balanceContracts: big.NewInt(0), contractID: stellarAssetContractID(itest, asset), }) -} -func createSAC(itest *integration.Test, asset xdr.Asset) { - invokeHostFunction := &txnbuild.InvokeHostFunction{ - HostFunction: xdr.HostFunction{ - Type: xdr.HostFunctionTypeHostFunctionTypeCreateContract, - CreateContract: &xdr.CreateContractArgs{ - ContractIdPreimage: xdr.ContractIdPreimage{ - Type: xdr.ContractIdPreimageTypeContractIdPreimageFromAsset, - FromAsset: &asset, - }, - Executable: xdr.ContractExecutable{ - Type: xdr.ContractExecutableTypeContractExecutableStellarAsset, - WasmHash: nil, - }, - }, - }, - SourceAccount: itest.Master().Address(), + if !DisabledSoroban { + fx := getTxEffects(itest, transferTx, asset) + assert.Len(t, fx, 2) + assertContainsEffect(t, fx, + effects.EffectAccountCredited, + effects.EffectAccountDebited) + } else { + fx := getTxEffects(itest, transferTx, asset) + require.Len(t, fx, 0) } - _, _, preFlightOp := assertInvokeHostFnSucceeds(itest, itest.Master(), invokeHostFunction) - sourceAccount, extendTTLOp, minFee := itest.PreflightExtendExpiration( - itest.Master().Address(), - preFlightOp.Ext.SorobanData.Resources.Footprint.ReadWrite, - LongTermTTL, - ) - itest.MustSubmitOperationsWithFee(&sourceAccount, itest.Master(), minFee+txnbuild.MinBaseFee, &extendTTLOp) } -func TestContractMintToContract(t *testing.T) { +func CaseContractMintToContract(t *testing.T) { if integration.GetCoreMaxSupportedProtocol() < 20 { t.Skip("This test run does not support less than Protocol 20") } itest := integration.NewTest(t, integration.Config{ - ProtocolVersion: 20, + ProtocolVersion: 20, + HorizonEnvironment: map[string]string{ + "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)}, EnableSorobanRPC: true, }) @@ -170,19 +265,25 @@ func TestContractMintToContract(t *testing.T) { i128Param(int64(mintAmount.Hi), uint64(mintAmount.Lo)), contractAddressParam(recipientContractID)), ) - assertContainsEffect(t, getTxEffects(itest, mintTx, asset), - effects.EffectContractCredited) - balanceAmount, _, _ := assertInvokeHostFnSucceeds( - itest, - itest.Master(), - contractBalance(itest, issuer, asset, recipientContractID), - ) - assert.Equal(itest.CurrentTest(), xdr.ScValTypeScvI128, balanceAmount.Type) - assert.Equal(itest.CurrentTest(), xdr.Uint64(math.MaxUint64-3), (*balanceAmount.I128).Lo) - assert.Equal(itest.CurrentTest(), xdr.Int64(math.MaxInt64), (*balanceAmount.I128).Hi) assertEventPayments(itest, mintTx, asset, "", strkeyRecipientContractID, "mint", amount.String128(mintAmount)) + if !DisabledSoroban { + assertContainsEffect(t, getTxEffects(itest, mintTx, asset), + effects.EffectContractCredited) + + balanceAmount, _, _ := assertInvokeHostFnSucceeds( + itest, + itest.Master(), + contractBalance(itest, issuer, asset, recipientContractID), + ) + assert.Equal(itest.CurrentTest(), xdr.ScValTypeScvI128, balanceAmount.Type) + assert.Equal(itest.CurrentTest(), xdr.Uint64(math.MaxUint64-3), (*balanceAmount.I128).Lo) + assert.Equal(itest.CurrentTest(), xdr.Int64(math.MaxInt64), (*balanceAmount.I128).Hi) + } else { + fx := getTxEffects(itest, mintTx, asset) + require.Len(t, fx, 0) + } // calling transfer from the issuer account will also mint the asset _, transferTx, _ := assertInvokeHostFnSucceeds( itest, @@ -190,19 +291,6 @@ func TestContractMintToContract(t *testing.T) { transferWithAmount(itest, issuer, asset, i128Param(0, 3), contractAddressParam(recipientContractID)), ) - assertContainsEffect(t, getTxEffects(itest, transferTx, asset), - effects.EffectAccountDebited, - effects.EffectContractCredited) - - balanceAmount, _, _ = assertInvokeHostFnSucceeds( - itest, - itest.Master(), - contractBalance(itest, issuer, asset, recipientContractID), - ) - assert.Equal(itest.CurrentTest(), xdr.ScValTypeScvI128, balanceAmount.Type) - assert.Equal(itest.CurrentTest(), xdr.Uint64(math.MaxUint64), (*balanceAmount.I128).Lo) - assert.Equal(itest.CurrentTest(), xdr.Int64(math.MaxInt64), (*balanceAmount.I128).Hi) - // 2^127 - 1 balanceContracts := new(big.Int).Lsh(big.NewInt(1), 127) balanceContracts.Sub(balanceContracts, big.NewInt(1)) @@ -217,9 +305,27 @@ func TestContractMintToContract(t *testing.T) { balanceContracts: balanceContracts, contractID: stellarAssetContractID(itest, asset), }) + + if !DisabledSoroban { + assertContainsEffect(t, getTxEffects(itest, transferTx, asset), + effects.EffectAccountDebited, + effects.EffectContractCredited) + + balanceAmount, _, _ := assertInvokeHostFnSucceeds( + itest, + itest.Master(), + contractBalance(itest, issuer, asset, recipientContractID), + ) + assert.Equal(itest.CurrentTest(), xdr.ScValTypeScvI128, balanceAmount.Type) + assert.Equal(itest.CurrentTest(), xdr.Uint64(math.MaxUint64), (*balanceAmount.I128).Lo) + assert.Equal(itest.CurrentTest(), xdr.Int64(math.MaxInt64), (*balanceAmount.I128).Hi) + } else { + fx := getTxEffects(itest, transferTx, asset) + require.Len(t, fx, 0) + } } -func TestExpirationAndRestoration(t *testing.T) { +func CaseExpirationAndRestoration(t *testing.T) { if integration.GetCoreMaxSupportedProtocol() < 20 { t.Skip("This test run does not support less than Protocol 20") } @@ -232,6 +338,7 @@ func TestExpirationAndRestoration(t *testing.T) { // a fake asset contract in the horizon db and we don't // want state verification to detect this "ingest-disable-state-verification": "true", + "disable-soroban-ingest": fmt.Sprint(DisabledSoroban), }, }) @@ -294,6 +401,7 @@ func TestExpirationAndRestoration(t *testing.T) { LongTermTTL, ) itest.MustSubmitOperationsWithFee(&sourceAccount, itest.Master(), minFee+txnbuild.MinBaseFee, &extendTTLOp) + assertAssetStats(itest, assetStats{ code: code, issuer: issuer, @@ -321,6 +429,16 @@ func TestExpirationAndRestoration(t *testing.T) { balanceToExpire, ), ) + + balanceToExpireLedgerKey := xdr.LedgerKey{ + Type: xdr.LedgerEntryTypeContractData, + ContractData: &xdr.LedgerKeyContractData{ + Contract: balanceToExpire.ContractData.Contract, + Key: balanceToExpire.ContractData.Key, + Durability: balanceToExpire.ContractData.Durability, + }, + } + assertAssetStats(itest, assetStats{ code: code, issuer: issuer, @@ -333,14 +451,6 @@ func TestExpirationAndRestoration(t *testing.T) { contractID: storeContractID, }) - balanceToExpireLedgerKey := xdr.LedgerKey{ - Type: xdr.LedgerEntryTypeContractData, - ContractData: &xdr.LedgerKeyContractData{ - Contract: balanceToExpire.ContractData.Contract, - Key: balanceToExpire.ContractData.Key, - Durability: balanceToExpire.ContractData.Durability, - }, - } // The TESTING_MINIMUM_PERSISTENT_ENTRY_LIFETIME=10 configuration in stellar-core // will ensure that the ledger entry expires after 10 ledgers. // Because ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING is set to true, 10 ledgers @@ -372,6 +482,7 @@ func TestExpirationAndRestoration(t *testing.T) { ), ), ) + assertAssetStats(itest, assetStats{ code: code, issuer: issuer, @@ -390,6 +501,7 @@ func TestExpirationAndRestoration(t *testing.T) { balanceToExpireLedgerKey, ) itest.MustSubmitOperationsWithFee(&sourceAccount, itest.Master(), minFee+txnbuild.MinBaseFee, &restoreFootprint) + assertAssetStats(itest, assetStats{ code: code, issuer: issuer, @@ -419,6 +531,7 @@ func TestExpirationAndRestoration(t *testing.T) { ), ), ) + assertAssetStats(itest, assetStats{ code: code, issuer: issuer, @@ -444,6 +557,7 @@ func TestExpirationAndRestoration(t *testing.T) { ), ), ) + assertAssetStats(itest, assetStats{ code: code, issuer: issuer, @@ -457,56 +571,15 @@ func TestExpirationAndRestoration(t *testing.T) { }) } -func invokeStoreSet( - itest *integration.Test, - storeContractID xdr.Hash, - ledgerEntryData xdr.LedgerEntryData, -) *txnbuild.InvokeHostFunction { - key := ledgerEntryData.MustContractData().Key - val := ledgerEntryData.MustContractData().Val - return &txnbuild.InvokeHostFunction{ - HostFunction: xdr.HostFunction{ - Type: xdr.HostFunctionTypeHostFunctionTypeInvokeContract, - InvokeContract: &xdr.InvokeContractArgs{ - ContractAddress: contractIDParam(storeContractID), - FunctionName: "set", - Args: xdr.ScVec{ - key, - val, - }, - }, - }, - SourceAccount: itest.Master().Address(), - } -} - -func invokeStoreRemove( - itest *integration.Test, - storeContractID xdr.Hash, - ledgerKey xdr.LedgerKey, -) *txnbuild.InvokeHostFunction { - return &txnbuild.InvokeHostFunction{ - HostFunction: xdr.HostFunction{ - Type: xdr.HostFunctionTypeHostFunctionTypeInvokeContract, - InvokeContract: &xdr.InvokeContractArgs{ - ContractAddress: contractIDParam(storeContractID), - FunctionName: "remove", - Args: xdr.ScVec{ - ledgerKey.MustContractData().Key, - }, - }, - }, - SourceAccount: itest.Master().Address(), - } -} - -func TestContractTransferBetweenAccounts(t *testing.T) { +func CaseContractTransferBetweenAccounts(t *testing.T) { if integration.GetCoreMaxSupportedProtocol() < 20 { t.Skip("This test run does not support less than Protocol 20") } itest := integration.NewTest(t, integration.Config{ - ProtocolVersion: 20, + ProtocolVersion: 20, + HorizonEnvironment: map[string]string{ + "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)}, EnableSorobanRPC: true, }) @@ -534,6 +607,7 @@ func TestContractTransferBetweenAccounts(t *testing.T) { ) assertContainsBalance(itest, recipientKp, issuer, code, amount.MustParse("1000")) + assertAssetStats(itest, assetStats{ code: code, issuer: issuer, @@ -557,10 +631,6 @@ func TestContractTransferBetweenAccounts(t *testing.T) { assertContainsBalance(itest, recipientKp, issuer, code, amount.MustParse("970")) assertContainsBalance(itest, otherRecipientKp, issuer, code, amount.MustParse("30")) - - fx := getTxEffects(itest, transferTx, asset) - assert.NotEmpty(t, fx) - assertContainsEffect(t, fx, effects.EffectAccountCredited, effects.EffectAccountDebited) assertAssetStats(itest, assetStats{ code: code, issuer: issuer, @@ -573,15 +643,26 @@ func TestContractTransferBetweenAccounts(t *testing.T) { contractID: stellarAssetContractID(itest, asset), }) assertEventPayments(itest, transferTx, asset, recipientKp.Address(), otherRecipient.GetAccountID(), "transfer", "30.0000000") + + if !DisabledSoroban { + fx := getTxEffects(itest, transferTx, asset) + assert.NotEmpty(t, fx) + assertContainsEffect(t, fx, effects.EffectAccountCredited, effects.EffectAccountDebited) + } else { + fx := getTxEffects(itest, transferTx, asset) + require.Len(t, fx, 0) + } } -func TestContractTransferBetweenAccountAndContract(t *testing.T) { +func CaseContractTransferBetweenAccountAndContract(t *testing.T) { if integration.GetCoreMaxSupportedProtocol() < 20 { t.Skip("This test run does not support less than Protocol 20") } itest := integration.NewTest(t, integration.Config{ - ProtocolVersion: 20, + ProtocolVersion: 20, + HorizonEnvironment: map[string]string{ + "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)}, EnableSorobanRPC: true, }) @@ -627,9 +708,6 @@ func TestContractTransferBetweenAccountAndContract(t *testing.T) { mint(itest, issuer, asset, "1000", contractAddressParam(recipientContractID)), ) assertContainsBalance(itest, recipientKp, issuer, code, amount.MustParse("1000")) - assertContainsEffect(t, getTxEffects(itest, mintTx, asset), - effects.EffectContractCredited) - assertAssetStats(itest, assetStats{ code: code, issuer: issuer, @@ -642,6 +720,14 @@ func TestContractTransferBetweenAccountAndContract(t *testing.T) { contractID: stellarAssetContractID(itest, asset), }) + if !DisabledSoroban { + assertContainsEffect(t, getTxEffects(itest, mintTx, asset), + effects.EffectContractCredited) + } else { + fx := getTxEffects(itest, mintTx, asset) + require.Len(t, fx, 0) + } + // transfer from account to contract _, transferTx, _ := assertInvokeHostFnSucceeds( itest, @@ -649,8 +735,6 @@ func TestContractTransferBetweenAccountAndContract(t *testing.T) { transfer(itest, recipientKp.Address(), asset, "30", contractAddressParam(recipientContractID)), ) assertContainsBalance(itest, recipientKp, issuer, code, amount.MustParse("970")) - assertContainsEffect(t, getTxEffects(itest, transferTx, asset), - effects.EffectAccountDebited, effects.EffectContractCredited) assertAssetStats(itest, assetStats{ code: code, issuer: issuer, @@ -664,14 +748,19 @@ func TestContractTransferBetweenAccountAndContract(t *testing.T) { }) assertEventPayments(itest, transferTx, asset, recipientKp.Address(), strkeyRecipientContractID, "transfer", "30.0000000") + if !DisabledSoroban { + assertContainsEffect(t, getTxEffects(itest, transferTx, asset), + effects.EffectAccountDebited, effects.EffectContractCredited) + } else { + fx := getTxEffects(itest, transferTx, asset) + require.Len(t, fx, 0) + } // transfer from contract to account _, transferTx, _ = assertInvokeHostFnSucceeds( itest, recipientKp, transferFromContract(itest, recipientKp.Address(), asset, recipientContractID, recipientContractHash, "500", accountAddressParam(recipient.GetAccountID())), ) - assertContainsEffect(t, getTxEffects(itest, transferTx, asset), - effects.EffectContractDebited, effects.EffectAccountCredited) assertContainsBalance(itest, recipientKp, issuer, code, amount.MustParse("1470")) assertAssetStats(itest, assetStats{ code: code, @@ -686,6 +775,13 @@ func TestContractTransferBetweenAccountAndContract(t *testing.T) { }) assertEventPayments(itest, transferTx, asset, strkeyRecipientContractID, recipientKp.Address(), "transfer", "500.0000000") + if DisabledSoroban { + fx := getTxEffects(itest, transferTx, asset) + require.Len(t, fx, 0) + return + } + assertContainsEffect(t, getTxEffects(itest, transferTx, asset), + effects.EffectContractDebited, effects.EffectAccountCredited) balanceAmount, _, _ := assertInvokeHostFnSucceeds( itest, itest.Master(), @@ -696,13 +792,15 @@ func TestContractTransferBetweenAccountAndContract(t *testing.T) { assert.Equal(itest.CurrentTest(), xdr.Int64(0), (*balanceAmount.I128).Hi) } -func TestContractTransferBetweenContracts(t *testing.T) { +func CaseContractTransferBetweenContracts(t *testing.T) { if integration.GetCoreMaxSupportedProtocol() < 20 { t.Skip("This test run does not support less than Protocol 20") } itest := integration.NewTest(t, integration.Config{ - ProtocolVersion: 20, + ProtocolVersion: 20, + HorizonEnvironment: map[string]string{ + "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)}, EnableSorobanRPC: true, }) @@ -742,8 +840,28 @@ func TestContractTransferBetweenContracts(t *testing.T) { itest.Master(), transferFromContract(itest, issuer, asset, emitterContractID, emitterContractHash, "10", contractAddressParam(recipientContractID)), ) - assertContainsEffect(t, getTxEffects(itest, transferTx, asset), - effects.EffectContractCredited, effects.EffectContractDebited) + + assertAssetStats(itest, assetStats{ + code: code, + issuer: issuer, + numAccounts: 0, + balanceAccounts: 0, + balanceArchivedContracts: big.NewInt(0), + numArchivedContracts: 0, + numContracts: 2, + balanceContracts: big.NewInt(int64(amount.MustParse("1000"))), + contractID: stellarAssetContractID(itest, asset), + }) + assertEventPayments(itest, transferTx, asset, strkeyEmitterContractID, strkeyRecipientContractID, "transfer", "10.0000000") + + if !DisabledSoroban { + assertContainsEffect(t, getTxEffects(itest, transferTx, asset), + effects.EffectContractCredited, effects.EffectContractDebited) + } else { + fx := getTxEffects(itest, transferTx, asset) + require.Len(t, fx, 0) + return + } // Check balances of emitter and recipient emitterBalanceAmount, _, _ := assertInvokeHostFnSucceeds( @@ -763,28 +881,17 @@ func TestContractTransferBetweenContracts(t *testing.T) { assert.Equal(itest.CurrentTest(), xdr.ScValTypeScvI128, recipientBalanceAmount.Type) assert.Equal(itest.CurrentTest(), xdr.Uint64(100000000), (*recipientBalanceAmount.I128).Lo) assert.Equal(itest.CurrentTest(), xdr.Int64(0), (*recipientBalanceAmount.I128).Hi) - - assertAssetStats(itest, assetStats{ - code: code, - issuer: issuer, - numAccounts: 0, - balanceAccounts: 0, - balanceArchivedContracts: big.NewInt(0), - numArchivedContracts: 0, - numContracts: 2, - balanceContracts: big.NewInt(int64(amount.MustParse("1000"))), - contractID: stellarAssetContractID(itest, asset), - }) - assertEventPayments(itest, transferTx, asset, strkeyEmitterContractID, strkeyRecipientContractID, "transfer", "10.0000000") } -func TestContractBurnFromAccount(t *testing.T) { +func CaseContractBurnFromAccount(t *testing.T) { if integration.GetCoreMaxSupportedProtocol() < 20 { t.Skip("This test run does not support less than Protocol 20") } itest := integration.NewTest(t, integration.Config{ - ProtocolVersion: 20, + ProtocolVersion: 20, + HorizonEnvironment: map[string]string{ + "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)}, EnableSorobanRPC: true, }) @@ -830,16 +937,6 @@ func TestContractBurnFromAccount(t *testing.T) { burn(itest, recipientKp.Address(), asset, "500"), ) - fx := getTxEffects(itest, burnTx, asset) - require.Len(t, fx, 1) - assetEffects := assertContainsEffect(t, fx, effects.EffectAccountDebited) - require.GreaterOrEqual(t, len(assetEffects), 1) - burnEffect := assetEffects[0].(effects.AccountDebited) - - assert.Equal(t, issuer, burnEffect.Asset.Issuer) - assert.Equal(t, code, burnEffect.Asset.Code) - assert.Equal(t, "500.0000000", burnEffect.Amount) - assert.Equal(t, recipientKp.Address(), burnEffect.Account) assertAssetStats(itest, assetStats{ code: code, issuer: issuer, @@ -852,15 +949,33 @@ func TestContractBurnFromAccount(t *testing.T) { contractID: stellarAssetContractID(itest, asset), }) assertEventPayments(itest, burnTx, asset, recipientKp.Address(), "", "burn", "500.0000000") + + if !DisabledSoroban { + fx := getTxEffects(itest, burnTx, asset) + require.Len(t, fx, 1) + assetEffects := assertContainsEffect(t, fx, effects.EffectAccountDebited) + require.GreaterOrEqual(t, len(assetEffects), 1) + burnEffect := assetEffects[0].(effects.AccountDebited) + + assert.Equal(t, issuer, burnEffect.Asset.Issuer) + assert.Equal(t, code, burnEffect.Asset.Code) + assert.Equal(t, "500.0000000", burnEffect.Amount) + assert.Equal(t, recipientKp.Address(), burnEffect.Account) + } else { + fx := getTxEffects(itest, burnTx, asset) + require.Len(t, fx, 0) + } } -func TestContractBurnFromContract(t *testing.T) { +func CaseContractBurnFromContract(t *testing.T) { if integration.GetCoreMaxSupportedProtocol() < 20 { t.Skip("This test run does not support less than Protocol 20") } itest := integration.NewTest(t, integration.Config{ - ProtocolVersion: 20, + ProtocolVersion: 20, + HorizonEnvironment: map[string]string{ + "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)}, EnableSorobanRPC: true, }) @@ -895,19 +1010,6 @@ func TestContractBurnFromContract(t *testing.T) { burnSelf(itest, issuer, asset, recipientContractID, recipientContractHash, "10"), ) - balanceAmount, _, _ := assertInvokeHostFnSucceeds( - itest, - itest.Master(), - contractBalance(itest, issuer, asset, recipientContractID), - ) - - assert.Equal(itest.CurrentTest(), xdr.ScValTypeScvI128, balanceAmount.Type) - assert.Equal(itest.CurrentTest(), xdr.Uint64(9900000000), (*balanceAmount.I128).Lo) - assert.Equal(itest.CurrentTest(), xdr.Int64(0), (*balanceAmount.I128).Hi) - - assertContainsEffect(t, getTxEffects(itest, burnTx, asset), - effects.EffectContractDebited) - assertAssetStats(itest, assetStats{ code: code, issuer: issuer, @@ -920,15 +1022,35 @@ func TestContractBurnFromContract(t *testing.T) { contractID: stellarAssetContractID(itest, asset), }) assertEventPayments(itest, burnTx, asset, strkeyRecipientContractID, "", "burn", "10.0000000") + + if !DisabledSoroban { + balanceAmount, _, _ := assertInvokeHostFnSucceeds( + itest, + itest.Master(), + contractBalance(itest, issuer, asset, recipientContractID), + ) + + assert.Equal(itest.CurrentTest(), xdr.ScValTypeScvI128, balanceAmount.Type) + assert.Equal(itest.CurrentTest(), xdr.Uint64(9900000000), (*balanceAmount.I128).Lo) + assert.Equal(itest.CurrentTest(), xdr.Int64(0), (*balanceAmount.I128).Hi) + + assertContainsEffect(t, getTxEffects(itest, burnTx, asset), + effects.EffectContractDebited) + } else { + fx := getTxEffects(itest, burnTx, asset) + require.Len(t, fx, 0) + } } -func TestContractClawbackFromAccount(t *testing.T) { +func CaseContractClawbackFromAccount(t *testing.T) { if integration.GetCoreMaxSupportedProtocol() < 20 { t.Skip("This test run does not support less than Protocol 20") } itest := integration.NewTest(t, integration.Config{ - ProtocolVersion: 20, + ProtocolVersion: 20, + HorizonEnvironment: map[string]string{ + "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)}, EnableSorobanRPC: true, }) @@ -966,6 +1088,7 @@ func TestContractClawbackFromAccount(t *testing.T) { ) assertContainsBalance(itest, recipientKp, issuer, code, amount.MustParse("1000")) + assertAssetStats(itest, assetStats{ code: code, issuer: issuer, @@ -983,8 +1106,6 @@ func TestContractClawbackFromAccount(t *testing.T) { itest.Master(), clawback(itest, issuer, asset, "1000", accountAddressParam(recipientKp.Address())), ) - - assertContainsEffect(t, getTxEffects(itest, clawTx, asset), effects.EffectAccountDebited) assertContainsBalance(itest, recipientKp, issuer, code, 0) assertAssetStats(itest, assetStats{ code: code, @@ -998,15 +1119,24 @@ func TestContractClawbackFromAccount(t *testing.T) { contractID: stellarAssetContractID(itest, asset), }) assertEventPayments(itest, clawTx, asset, recipientKp.Address(), "", "clawback", "1000.0000000") + + if !DisabledSoroban { + assertContainsEffect(t, getTxEffects(itest, clawTx, asset), effects.EffectAccountDebited) + } else { + fx := getTxEffects(itest, clawTx, asset) + require.Len(t, fx, 0) + } } -func TestContractClawbackFromContract(t *testing.T) { +func CaseContractClawbackFromContract(t *testing.T) { if integration.GetCoreMaxSupportedProtocol() < 20 { t.Skip("This test run does not support less than Protocol 20") } itest := integration.NewTest(t, integration.Config{ - ProtocolVersion: 20, + ProtocolVersion: 20, + HorizonEnvironment: map[string]string{ + "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)}, EnableSorobanRPC: true, }) @@ -1044,19 +1174,6 @@ func TestContractClawbackFromContract(t *testing.T) { itest.Master(), clawback(itest, issuer, asset, "10", contractAddressParam(recipientContractID)), ) - - balanceAmount, _, _ := assertInvokeHostFnSucceeds( - itest, - itest.Master(), - contractBalance(itest, issuer, asset, recipientContractID), - ) - assert.Equal(itest.CurrentTest(), xdr.ScValTypeScvI128, balanceAmount.Type) - assert.Equal(itest.CurrentTest(), xdr.Uint64(9900000000), (*balanceAmount.I128).Lo) - assert.Equal(itest.CurrentTest(), xdr.Int64(0), (*balanceAmount.I128).Hi) - - assertContainsEffect(t, getTxEffects(itest, clawTx, asset), - effects.EffectContractDebited) - assertAssetStats(itest, assetStats{ code: code, issuer: issuer, @@ -1069,6 +1186,23 @@ func TestContractClawbackFromContract(t *testing.T) { contractID: stellarAssetContractID(itest, asset), }) assertEventPayments(itest, clawTx, asset, strkeyRecipientContractID, "", "clawback", "10.0000000") + + if !DisabledSoroban { + balanceAmount, _, _ := assertInvokeHostFnSucceeds( + itest, + itest.Master(), + contractBalance(itest, issuer, asset, recipientContractID), + ) + assert.Equal(itest.CurrentTest(), xdr.ScValTypeScvI128, balanceAmount.Type) + assert.Equal(itest.CurrentTest(), xdr.Uint64(9900000000), (*balanceAmount.I128).Lo) + assert.Equal(itest.CurrentTest(), xdr.Int64(0), (*balanceAmount.I128).Hi) + + assertContainsEffect(t, getTxEffects(itest, clawTx, asset), + effects.EffectContractDebited) + } else { + fx := getTxEffects(itest, clawTx, asset) + require.Len(t, fx, 0) + } } func assertContainsBalance(itest *integration.Test, acct *keypair.Full, issuer, code string, amt xdr.Int64) { @@ -1179,6 +1313,12 @@ func assertEventPayments(itest *integration.Test, txHash string, asset xdr.Asset invokeHostFn := ops.Embedded.Records[0].(operations.InvokeHostFunction) assert.Equal(itest.CurrentTest(), invokeHostFn.Function, "HostFunctionTypeHostFunctionTypeInvokeContract") + + if DisabledSoroban { + require.Equal(itest.CurrentTest(), 0, len(invokeHostFn.AssetBalanceChanges)) + return + } + require.Equal(itest.CurrentTest(), 1, len(invokeHostFn.AssetBalanceChanges)) assetBalanceChange := invokeHostFn.AssetBalanceChanges[0] assert.Equal(itest.CurrentTest(), assetBalanceChange.Amount, amount) @@ -1400,10 +1540,6 @@ func assertInvokeHostFnSucceeds(itest *integration.Test, signer *keypair.Full, o err = xdr.SafeUnmarshalBase64(clientTx.ResultXdr, &txResult) require.NoError(itest.CurrentTest(), err) - var txMetaResult xdr.TransactionMeta - err = xdr.SafeUnmarshalBase64(clientTx.ResultMetaXdr, &txMetaResult) - require.NoError(itest.CurrentTest(), err) - opResults, ok := txResult.OperationResults() assert.True(itest.CurrentTest(), ok) assert.Equal(itest.CurrentTest(), len(opResults), 1) @@ -1411,9 +1547,18 @@ func assertInvokeHostFnSucceeds(itest *integration.Test, signer *keypair.Full, o assert.True(itest.CurrentTest(), ok) assert.Equal(itest.CurrentTest(), invokeHostFunctionResult.Code, xdr.InvokeHostFunctionResultCodeInvokeHostFunctionSuccess) - returnValue := txMetaResult.MustV3().SorobanMeta.ReturnValue + var returnValue *xdr.ScVal + + if !DisabledSoroban { + var txMetaResult xdr.TransactionMeta + err = xdr.SafeUnmarshalBase64(clientTx.ResultMetaXdr, &txMetaResult) + require.NoError(itest.CurrentTest(), err) + returnValue = &txMetaResult.MustV3().SorobanMeta.ReturnValue + } else { + verifySorobanMeta(itest.CurrentTest(), clientTx) + } - return &returnValue, clientTx.Hash, &preFlightOp + return returnValue, clientTx.Hash, &preFlightOp } func stellarAssetContractID(itest *integration.Test, asset xdr.Asset) xdr.Hash { From 93f9d706abadbe1594544093a7065665d26bc5cc Mon Sep 17 00:00:00 2001 From: George Date: Wed, 24 Jan 2024 22:18:45 -0800 Subject: [PATCH 13/21] historyarchive: Improve existence checks and performance (#5179) --- historyarchive/archive_cache.go | 21 ++++++++++++++++++++- ingest/verify/main.go | 2 +- services/horizon/internal/ingest/verify.go | 2 +- 3 files changed, 22 insertions(+), 3 deletions(-) diff --git a/historyarchive/archive_cache.go b/historyarchive/archive_cache.go index a3990428b0..fa279fffd2 100644 --- a/historyarchive/archive_cache.go +++ b/historyarchive/archive_cache.go @@ -113,7 +113,26 @@ func (abc *ArchiveBucketCache) GetFile( } func (abc *ArchiveBucketCache) Exists(filepath string) bool { - return abc.lru.Contains(path.Join(abc.path, filepath)) + localPath := path.Join(abc.path, filepath) + + // First, check if the file exists in the cache. + if abc.lru.Contains(localPath) { + return true + } + + // If it doesn't, it may still exist on the disk which is still a cheaper + // check than going upstream. + // + // Note that this means the cache and disk are out of sync (perhaps due to + // other archives using the same cache location) so we can update it. This + // situation is well-handled by `GetFile`. + _, statErr := os.Stat(localPath) + if statErr == nil || os.IsExist(statErr) { + abc.lru.Add(localPath, struct{}{}) + return true + } + + return false } // Close purges the cache and cleans up the filesystem. diff --git a/ingest/verify/main.go b/ingest/verify/main.go index 4b97ffc2f7..6110448723 100644 --- a/ingest/verify/main.go +++ b/ingest/verify/main.go @@ -66,7 +66,7 @@ func (v *StateVerifier) GetLedgerEntries(count int) ([]xdr.LedgerEntry, error) { } entries := make([]xdr.LedgerEntry, 0, count) - v.currentEntries = make(map[string]xdr.LedgerEntry) + v.currentEntries = make(map[string]xdr.LedgerEntry, count) for count > 0 { entryChange, err := v.stateReader.Read() diff --git a/services/horizon/internal/ingest/verify.go b/services/horizon/internal/ingest/verify.go index bf1ddbe5b5..41b0eb98c5 100644 --- a/services/horizon/internal/ingest/verify.go +++ b/services/horizon/internal/ingest/verify.go @@ -157,8 +157,8 @@ func (s *system) verifyState(verifyAgainstLatestCheckpoint bool) error { } } } - log.WithField("duration", duration).Info("State verification finished") + localLog.WithField("duration", duration).Info("State verification finished") }() localLog.Info("Creating state reader...") From fcfa5a1c8b497e8d7873e8d8121bec32798cce2c Mon Sep 17 00:00:00 2001 From: shawn Date: Fri, 26 Jan 2024 13:08:43 -0800 Subject: [PATCH 14/21] update 2.28.0 changelog, captive core cursor removal notes (#5181) --- services/horizon/CHANGELOG.md | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/services/horizon/CHANGELOG.md b/services/horizon/CHANGELOG.md index 0899744f94..b832d67404 100644 --- a/services/horizon/CHANGELOG.md +++ b/services/horizon/CHANGELOG.md @@ -8,7 +8,7 @@ file. This project adheres to [Semantic Versioning](http://semver.org/). ## 2.28.0 ### Fixed -- Ingestion performance timing is improved ([4909](https://github.com/stellar/go/issues/4909)) +- Ingestion performance improvements ([4909](https://github.com/stellar/go/issues/4909)) - Trade aggregation rebuild errors reported on `db reingest range` with parallel workers ([5168](https://github.com/stellar/go/pull/5168)) - Limited global flags displayed on cli help output ([5077](https://github.com/stellar/go/pull/5077)) - Network usage has been significantly reduced with caching. **Warning:** To support the cache, disk requirements may increase by up to 15GB ([5171](https://github.com/stellar/go/pull/5171)). @@ -24,7 +24,19 @@ file. This project adheres to [Semantic Versioning](http://semver.org/). * API `Operation` model for `InvokeHostFunctionOp` type, will have empty `asset_balance_changes` ### Breaking Changes -- Removed configuration flags `--stellar-core-url-db`, `--cursor-name` `--skip-cursor-update` , they were related to legacy non-captive core ingestion and are no longer usable. +- Deprecation of legacy, non-captive core ingestion([5158](https://github.com/stellar/go/pull/5158)): + * removed configuration flags `--stellar-core-url-db`, `--cursor-name` `--skip-cursor-update`, they are no longer usable. + * removed automatic updating of core cursor from ingestion background processing. + * Note for upgrading on existing horizon deployments - Since horizon will no longer maintain advancement of this cursor on core, it may require manual removal of the cursor from the core process that your horizon was using for captive core, otherwise that core process may un-necessarily retain older data in buckets on disk up to the last cursor ledger sequence set by prior horizon release. + + The captive core process to check and verify presence of cursor usage is determined by the horizon deployment, if `NETWORK` is present, or `STELLAR_CORE_URL` is present or `CAPTIVE-CORE-HTTP-PORT` is present and set to non-zero value, or `CAPTIVE-CORE_CONFIG_PATH` is used and the toml has `HTTP_PORT` set to non-zero and `PUBLIC_HTTP_PORT` is not set to false, then it is recommended to perform the following preventative measure on the machine hosting horizon after upgraded to 2.28.0 and process restarted: + ``` + $ curl http:///getcursor + 2. # If there are no cursors reported, done, no need for any action + 3. # If any horizon cursors exist they need to be dropped by id. By default horizon sets cursor id to "HORIZON" but if it was customised using the --cursor-name flag the id might be different + $ curl http:///dropcursor?id= + ``` + ## 2.27.0 From 24a7e9f27184ccc8b0e7a7bbf3839fcdcecc012d Mon Sep 17 00:00:00 2001 From: Shawn Reuland Date: Fri, 26 Jan 2024 13:26:31 -0800 Subject: [PATCH 15/21] clean up markdown on 2.28.0 release notes --- services/horizon/CHANGELOG.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/services/horizon/CHANGELOG.md b/services/horizon/CHANGELOG.md index b832d67404..6c3dbf6e9b 100644 --- a/services/horizon/CHANGELOG.md +++ b/services/horizon/CHANGELOG.md @@ -26,14 +26,16 @@ file. This project adheres to [Semantic Versioning](http://semver.org/). ### Breaking Changes - Deprecation of legacy, non-captive core ingestion([5158](https://github.com/stellar/go/pull/5158)): * removed configuration flags `--stellar-core-url-db`, `--cursor-name` `--skip-cursor-update`, they are no longer usable. - * removed automatic updating of core cursor from ingestion background processing. - * Note for upgrading on existing horizon deployments - Since horizon will no longer maintain advancement of this cursor on core, it may require manual removal of the cursor from the core process that your horizon was using for captive core, otherwise that core process may un-necessarily retain older data in buckets on disk up to the last cursor ledger sequence set by prior horizon release. + * removed automatic updating of core cursor from ingestion background processing.
+ **Note** for upgrading on existing horizon deployments - Since horizon will no longer maintain advancement of this cursor on core, it may require manual removal of the cursor from the core process that your horizon was using for captive core, otherwise that core process may un-necessarily retain older data in buckets on disk up to the last cursor ledger sequence set by prior horizon release. The captive core process to check and verify presence of cursor usage is determined by the horizon deployment, if `NETWORK` is present, or `STELLAR_CORE_URL` is present or `CAPTIVE-CORE-HTTP-PORT` is present and set to non-zero value, or `CAPTIVE-CORE_CONFIG_PATH` is used and the toml has `HTTP_PORT` set to non-zero and `PUBLIC_HTTP_PORT` is not set to false, then it is recommended to perform the following preventative measure on the machine hosting horizon after upgraded to 2.28.0 and process restarted: ``` $ curl http:///getcursor - 2. # If there are no cursors reported, done, no need for any action - 3. # If any horizon cursors exist they need to be dropped by id. By default horizon sets cursor id to "HORIZON" but if it was customised using the --cursor-name flag the id might be different + # If there are no cursors reported, done, no need for any action + # If any horizon cursors exist they need to be dropped by id. + # By default horizon sets cursor id to "HORIZON" but if it was customized + # using the --cursor-name flag the id might be different $ curl http:///dropcursor?id= ``` From 0fa7d228536045bfc1dc1983d553e58aba3d32af Mon Sep 17 00:00:00 2001 From: Aditya Vyas Date: Sat, 3 Feb 2024 00:13:37 -0500 Subject: [PATCH 16/21] Fix for transaction submission timeout (#5191) * Add check for ledger state in txsub * Add test for badSeq * Fix failing unittest * Update system_test.go * Small changes * Update main.go --- services/horizon/internal/init.go | 1 + ...eq_txsub_test.go => bad_seq_txsub_test.go} | 42 +++++++++ services/horizon/internal/ledger/main.go | 13 ++- .../horizon/internal/txsub/helpers_test.go | 32 +++++++ services/horizon/internal/txsub/system.go | 11 ++- .../horizon/internal/txsub/system_test.go | 93 +++++++++++++++++++ 6 files changed, 188 insertions(+), 4 deletions(-) rename services/horizon/internal/integration/{negative_seq_txsub_test.go => bad_seq_txsub_test.go} (63%) diff --git a/services/horizon/internal/init.go b/services/horizon/internal/init.go index 4078c7ad00..d4b34f9f4d 100644 --- a/services/horizon/internal/init.go +++ b/services/horizon/internal/init.go @@ -235,5 +235,6 @@ func initSubmissionSystem(app *App) { DB: func(ctx context.Context) txsub.HorizonDB { return &history.Q{SessionInterface: app.HorizonSession()} }, + LedgerState: app.ledgerState, } } diff --git a/services/horizon/internal/integration/negative_seq_txsub_test.go b/services/horizon/internal/integration/bad_seq_txsub_test.go similarity index 63% rename from services/horizon/internal/integration/negative_seq_txsub_test.go rename to services/horizon/internal/integration/bad_seq_txsub_test.go index 787ad0645c..2a5f9d13fe 100644 --- a/services/horizon/internal/integration/negative_seq_txsub_test.go +++ b/services/horizon/internal/integration/bad_seq_txsub_test.go @@ -71,3 +71,45 @@ func TestNegativeSequenceTxSubmission(t *testing.T) { tt.Equal("tx_bad_seq", codes.TransactionCode) } + +func TestBadSeqTxSubmission(t *testing.T) { + tt := assert.New(t) + itest := integration.NewTest(t, integration.Config{}) + master := itest.Master() + + account := itest.MasterAccount() + seqnum, err := account.GetSequenceNumber() + tt.NoError(err) + + op2 := txnbuild.Payment{ + Destination: master.Address(), + Amount: "10", + Asset: txnbuild.NativeAsset{}, + } + + // Submit a simple payment tx, but with a gapped sequence + // that is intentionally set more than one ahead of current account seq + // this should trigger a tx_bad_seq from core + account = &txnbuild.SimpleAccount{ + AccountID: account.GetAccountID(), + Sequence: seqnum + 10, + } + txParams := txnbuild.TransactionParams{ + SourceAccount: account, + Operations: []txnbuild.Operation{&op2}, + BaseFee: txnbuild.MinBaseFee, + Preconditions: txnbuild.Preconditions{TimeBounds: txnbuild.NewInfiniteTimeout()}, + IncrementSequenceNum: false, + } + tx, err := txnbuild.NewTransaction(txParams) + tt.NoError(err) + tx, err = tx.Sign(integration.StandaloneNetworkPassphrase, master) + tt.NoError(err) + _, err = itest.Client().SubmitTransaction(tx) + tt.Error(err) + clientErr, ok := err.(*horizonclient.Error) + tt.True(ok) + codes, err := clientErr.ResultCodes() + tt.NoError(err) + tt.Equal("tx_bad_seq", codes.TransactionCode) +} diff --git a/services/horizon/internal/ledger/main.go b/services/horizon/internal/ledger/main.go index 1d17e09d67..2101048bad 100644 --- a/services/horizon/internal/ledger/main.go +++ b/services/horizon/internal/ledger/main.go @@ -6,10 +6,9 @@ package ledger import ( + "github.com/prometheus/client_golang/prometheus" "sync" "time" - - "github.com/prometheus/client_golang/prometheus" ) // Status represents a snapshot of both horizon's and stellar-core's view of the @@ -31,7 +30,7 @@ type HorizonStatus struct { } // State is an in-memory data structure which holds a snapshot of both -// horizon's and stellar-core's view of the the network +// horizon's and stellar-core's view of the network type State struct { sync.RWMutex current Status @@ -44,6 +43,14 @@ type State struct { } } +type StateInterface interface { + CurrentStatus() Status + SetStatus(next Status) + SetCoreStatus(next CoreStatus) + SetHorizonStatus(next HorizonStatus) + RegisterMetrics(registry *prometheus.Registry) +} + // CurrentStatus returns the cached snapshot of ledger state func (c *State) CurrentStatus() Status { c.RLock() diff --git a/services/horizon/internal/txsub/helpers_test.go b/services/horizon/internal/txsub/helpers_test.go index 0e5a63bca7..3c4cb6cb0b 100644 --- a/services/horizon/internal/txsub/helpers_test.go +++ b/services/horizon/internal/txsub/helpers_test.go @@ -9,6 +9,8 @@ package txsub import ( "context" "database/sql" + "github.com/prometheus/client_golang/prometheus" + "github.com/stellar/go/services/horizon/internal/ledger" "github.com/stellar/go/services/horizon/internal/db2/history" "github.com/stretchr/testify/mock" @@ -72,3 +74,33 @@ func (m *mockDBQ) TransactionByHash(ctx context.Context, dest interface{}, hash args := m.Called(ctx, dest, hash) return args.Error(0) } + +type MockLedgerState struct { + mock.Mock +} + +// CurrentStatus mocks the CurrentStatus method. +func (m *MockLedgerState) CurrentStatus() ledger.Status { + args := m.Called() + return args.Get(0).(ledger.Status) +} + +// SetStatus mocks the SetStatus method. +func (m *MockLedgerState) SetStatus(next ledger.Status) { + m.Called(next) +} + +// SetCoreStatus mocks the SetCoreStatus method. +func (m *MockLedgerState) SetCoreStatus(next ledger.CoreStatus) { + m.Called(next) +} + +// SetHorizonStatus mocks the SetHorizonStatus method. +func (m *MockLedgerState) SetHorizonStatus(next ledger.HorizonStatus) { + m.Called(next) +} + +// RegisterMetrics mocks the RegisterMetrics method. +func (m *MockLedgerState) RegisterMetrics(registry *prometheus.Registry) { + m.Called(registry) +} diff --git a/services/horizon/internal/txsub/system.go b/services/horizon/internal/txsub/system.go index 189f1619ff..31038135f3 100644 --- a/services/horizon/internal/txsub/system.go +++ b/services/horizon/internal/txsub/system.go @@ -4,6 +4,7 @@ import ( "context" "database/sql" "fmt" + "github.com/stellar/go/services/horizon/internal/ledger" "sync" "time" @@ -40,6 +41,7 @@ type System struct { Submitter Submitter SubmissionTimeout time.Duration Log *log.Entry + LedgerState ledger.StateInterface Metrics struct { // SubmissionDuration exposes timing metrics about the rate and latency of @@ -190,7 +192,7 @@ func (sys *System) waitUntilAccountSequence(ctx context.Context, db HorizonDB, s WithField("sourceAddress", sourceAddress). Warn("missing sequence number for account") } - if num >= seq { + if num >= seq || sys.isSyncedUp() { return nil } } @@ -204,6 +206,13 @@ func (sys *System) waitUntilAccountSequence(ctx context.Context, db HorizonDB, s } } +// isSyncedUp Check if Horizon and Core have synced up: If yes, then no need to wait for account sequence +// and send txBAD_SEQ right away. +func (sys *System) isSyncedUp() bool { + currentStatus := sys.LedgerState.CurrentStatus() + return int(currentStatus.CoreLatest) <= int(currentStatus.HistoryLatest) +} + func (sys *System) deriveTxSubError(ctx context.Context) error { if ctx.Err() == context.Canceled { return ErrCanceled diff --git a/services/horizon/internal/txsub/system_test.go b/services/horizon/internal/txsub/system_test.go index 816cc28e66..b4a36fb522 100644 --- a/services/horizon/internal/txsub/system_test.go +++ b/services/horizon/internal/txsub/system_test.go @@ -6,6 +6,7 @@ import ( "context" "database/sql" "errors" + "github.com/stellar/go/services/horizon/internal/ledger" "testing" "time" @@ -155,6 +156,17 @@ func (suite *SystemTestSuite) TestTimeoutDuringSequenceLoop() { suite.db.On("GetSequenceNumbers", suite.ctx, []string{suite.unmuxedSource.Address()}). Return(map[string]uint64{suite.unmuxedSource.Address(): 0}, nil) + mockLedgerState := &MockLedgerState{} + mockLedgerState.On("CurrentStatus").Return(ledger.Status{ + CoreStatus: ledger.CoreStatus{ + CoreLatest: 3, + }, + HorizonStatus: ledger.HorizonStatus{ + HistoryLatest: 1, + }, + }).Twice() + suite.system.LedgerState = mockLedgerState + r := <-suite.system.Submit( suite.ctx, suite.successTx.Transaction.TxEnvelope, @@ -187,6 +199,17 @@ func (suite *SystemTestSuite) TestClientDisconnectedDuringSequenceLoop() { suite.db.On("GetSequenceNumbers", suite.ctx, []string{suite.unmuxedSource.Address()}). Return(map[string]uint64{suite.unmuxedSource.Address(): 0}, nil) + mockLedgerState := &MockLedgerState{} + mockLedgerState.On("CurrentStatus").Return(ledger.Status{ + CoreStatus: ledger.CoreStatus{ + CoreLatest: 3, + }, + HorizonStatus: ledger.HorizonStatus{ + HistoryLatest: 1, + }, + }).Once() + suite.system.LedgerState = mockLedgerState + r := <-suite.system.Submit( suite.ctx, suite.successTx.Transaction.TxEnvelope, @@ -253,6 +276,17 @@ func (suite *SystemTestSuite) TestSubmit_BadSeq() { }). Return(nil).Once() + mockLedgerState := &MockLedgerState{} + mockLedgerState.On("CurrentStatus").Return(ledger.Status{ + CoreStatus: ledger.CoreStatus{ + CoreLatest: 3, + }, + HorizonStatus: ledger.HorizonStatus{ + HistoryLatest: 1, + }, + }).Twice() + suite.system.LedgerState = mockLedgerState + r := <-suite.system.Submit( suite.ctx, suite.successTx.Transaction.TxEnvelope, @@ -281,6 +315,64 @@ func (suite *SystemTestSuite) TestSubmit_BadSeqNotFound() { Return(map[string]uint64{suite.unmuxedSource.Address(): 1}, nil). Once() + mockLedgerState := &MockLedgerState{} + mockLedgerState.On("CurrentStatus").Return(ledger.Status{ + CoreStatus: ledger.CoreStatus{ + CoreLatest: 3, + }, + HorizonStatus: ledger.HorizonStatus{ + HistoryLatest: 1, + }, + }).Times(3) + suite.system.LedgerState = mockLedgerState + + // set poll interval to 1ms so we don't need to wait 3 seconds for the test to complete + suite.system.Init() + suite.system.accountSeqPollInterval = time.Millisecond + + r := <-suite.system.Submit( + suite.ctx, + suite.successTx.Transaction.TxEnvelope, + suite.successXDR, + suite.successTx.Transaction.TransactionHash, + ) + + assert.NotNil(suite.T(), r.Err) + assert.True(suite.T(), suite.submitter.WasSubmittedTo) +} + +// If error is bad_seq and horizon and core are in sync, then return error +func (suite *SystemTestSuite) TestSubmit_BadSeqErrorWhenInSync() { + suite.submitter.R = suite.badSeq + suite.db.On("PreFilteredTransactionByHash", suite.ctx, mock.Anything, suite.successTx.Transaction.TransactionHash). + Return(sql.ErrNoRows).Twice() + suite.db.On("NoRows", sql.ErrNoRows).Return(true).Twice() + suite.db.On("TransactionByHash", suite.ctx, mock.Anything, suite.successTx.Transaction.TransactionHash). + Return(sql.ErrNoRows).Twice() + suite.db.On("NoRows", sql.ErrNoRows).Return(true).Twice() + suite.db.On("GetSequenceNumbers", suite.ctx, []string{suite.unmuxedSource.Address()}). + Return(map[string]uint64{suite.unmuxedSource.Address(): 0}, nil). + Twice() + + mockLedgerState := &MockLedgerState{} + mockLedgerState.On("CurrentStatus").Return(ledger.Status{ + CoreStatus: ledger.CoreStatus{ + CoreLatest: 3, + }, + HorizonStatus: ledger.HorizonStatus{ + HistoryLatest: 1, + }, + }).Once() + mockLedgerState.On("CurrentStatus").Return(ledger.Status{ + CoreStatus: ledger.CoreStatus{ + CoreLatest: 1, + }, + HorizonStatus: ledger.HorizonStatus{ + HistoryLatest: 1, + }, + }).Once() + suite.system.LedgerState = mockLedgerState + // set poll interval to 1ms so we don't need to wait 3 seconds for the test to complete suite.system.Init() suite.system.accountSeqPollInterval = time.Millisecond @@ -293,6 +385,7 @@ func (suite *SystemTestSuite) TestSubmit_BadSeqNotFound() { ) assert.NotNil(suite.T(), r.Err) + assert.Equal(suite.T(), r.Err.Error(), "tx failed: AAAAAAAAAAD////7AAAAAA==") // decodes to txBadSeq assert.True(suite.T(), suite.submitter.WasSubmittedTo) } From 433831fb25cce9a1a64f5125f2c132217adf4fca Mon Sep 17 00:00:00 2001 From: Shawn Reuland Date: Sun, 4 Feb 2024 15:54:26 -0800 Subject: [PATCH 17/21] updated changelog notes --- services/horizon/CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/services/horizon/CHANGELOG.md b/services/horizon/CHANGELOG.md index 6c3dbf6e9b..9523d2f039 100644 --- a/services/horizon/CHANGELOG.md +++ b/services/horizon/CHANGELOG.md @@ -5,6 +5,12 @@ file. This project adheres to [Semantic Versioning](http://semver.org/). ## Unreleased +## 2.28.1 + +### Fixed +- Submitting transaction with a future gapped sequence number when horizon ingestion is lagging behind core, may result in delayed 60s timeout response ([5191](https://github.com/stellar/go/pull/5191)) + + ## 2.28.0 ### Fixed From 098e686ee4382425371895820559f03a2c583e8c Mon Sep 17 00:00:00 2001 From: Shawn Reuland Date: Sun, 4 Feb 2024 18:11:01 -0800 Subject: [PATCH 18/21] better description of txsub issue in notes --- services/horizon/CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/horizon/CHANGELOG.md b/services/horizon/CHANGELOG.md index 9523d2f039..193b731298 100644 --- a/services/horizon/CHANGELOG.md +++ b/services/horizon/CHANGELOG.md @@ -8,7 +8,7 @@ file. This project adheres to [Semantic Versioning](http://semver.org/). ## 2.28.1 ### Fixed -- Submitting transaction with a future gapped sequence number when horizon ingestion is lagging behind core, may result in delayed 60s timeout response ([5191](https://github.com/stellar/go/pull/5191)) +- Submitting transaction with a future gapped sequence number greater than 1 past current source account sequence, may result in delayed 60s timeout response, rather than expected HTTP 400 error response with `result_codes: {transaction: "tx_bad_seq"}` ([5191](https://github.com/stellar/go/pull/5191)) ## 2.28.0 From 4ef82e99441425466cda645457a684209f29bc85 Mon Sep 17 00:00:00 2001 From: urvisavla Date: Wed, 7 Feb 2024 16:47:23 -0800 Subject: [PATCH 19/21] services/horizon: Fix claimable balance query (#5200) * services/horizon: Fix claimable balance query * Fix Soroban RPC image incompatibility --- .github/workflows/horizon.yml | 2 +- .../db2/history/claimable_balances.go | 8 +- .../db2/history/claimable_balances_test.go | 182 ++++++++++++++++++ 3 files changed, 187 insertions(+), 5 deletions(-) diff --git a/.github/workflows/horizon.yml b/.github/workflows/horizon.yml index e7bf35cb34..c393e8d240 100644 --- a/.github/workflows/horizon.yml +++ b/.github/workflows/horizon.yml @@ -35,7 +35,7 @@ jobs: HORIZON_INTEGRATION_TESTS_CORE_MAX_SUPPORTED_PROTOCOL: ${{ matrix.protocol-version }} PROTOCOL_20_CORE_DEBIAN_PKG_VERSION: 20.1.0-1656.114b833e7.focal PROTOCOL_20_CORE_DOCKER_IMG: stellar/unsafe-stellar-core:20.1.0-1656.114b833e7.focal - PROTOCOL_20_SOROBAN_RPC_DOCKER_IMG: stellar/soroban-rpc:20.2.0 + PROTOCOL_20_SOROBAN_RPC_DOCKER_IMG: stellar/soroban-rpc:20.2.0@sha256:2b1237a6ca43ea5768031d9ab442e4895d0ce5437b38cbfee4c8ab9237f231ae PROTOCOL_19_CORE_DEBIAN_PKG_VERSION: 19.14.0-1500.5664eff4e.focal PROTOCOL_19_CORE_DOCKER_IMG: stellar/stellar-core:19.14.0-1500.5664eff4e.focal PGHOST: localhost diff --git a/services/horizon/internal/db2/history/claimable_balances.go b/services/horizon/internal/db2/history/claimable_balances.go index 5490bef11c..d45780a4c0 100644 --- a/services/horizon/internal/db2/history/claimable_balances.go +++ b/services/horizon/internal/db2/history/claimable_balances.go @@ -67,17 +67,17 @@ func applyClaimableBalancesQueriesCursor(sql sq.SelectBuilder, lCursor int64, rC case db2.OrderAscending: if hasPagedLimit { sql = sql. - Where(sq.Expr("(last_modified_ledger, id) > (?, ?)", lCursor, rCursor)) + Where(sq.Expr("(cb.last_modified_ledger, cb.id) > (?, ?)", lCursor, rCursor)) } - sql = sql.OrderBy("last_modified_ledger asc, id asc") + sql = sql.OrderBy("cb.last_modified_ledger asc, cb.id asc") case db2.OrderDescending: if hasPagedLimit { sql = sql. - Where(sq.Expr("(last_modified_ledger, id) < (?, ?)", lCursor, rCursor)) + Where(sq.Expr("(cb.last_modified_ledger, cb.id) < (?, ?)", lCursor, rCursor)) } - sql = sql.OrderBy("last_modified_ledger desc, id desc") + sql = sql.OrderBy("cb.last_modified_ledger desc, cb.id desc") default: return sql, errors.Errorf("invalid order: %s", order) } diff --git a/services/horizon/internal/db2/history/claimable_balances_test.go b/services/horizon/internal/db2/history/claimable_balances_test.go index ca32975c62..769ab3bc13 100644 --- a/services/horizon/internal/db2/history/claimable_balances_test.go +++ b/services/horizon/internal/db2/history/claimable_balances_test.go @@ -219,6 +219,188 @@ func TestFindClaimableBalancesByDestination(t *testing.T) { tt.Assert.Len(cbs, 1) } +func TestFindClaimableBalancesByCursor(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + tt.Assert.NoError(q.BeginTx(tt.Ctx, &sql.TxOptions{})) + defer func() { + _ = q.Rollback() + }() + + balanceInsertBuilder := q.NewClaimableBalanceBatchInsertBuilder() + claimantsInsertBuilder := q.NewClaimableBalanceClaimantBatchInsertBuilder() + + dest1 := "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML" + dest2 := "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H" + + sponsor1 := "GA25GQLHJU3LPEJXEIAXK23AWEA5GWDUGRSHTQHDFT6HXHVMRULSQJUJ" + sponsor2 := "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H" + + asset := xdr.MustNewCreditAsset("USD", dest1) + balanceID := xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &xdr.Hash{1, 2, 3}, + } + id, err := xdr.MarshalHex(balanceID) + tt.Assert.NoError(err) + cBalance := ClaimableBalance{ + BalanceID: id, + Claimants: []Claimant{ + { + Destination: dest1, + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + Asset: asset, + LastModifiedLedger: 123, + Amount: 10, + Sponsor: null.StringFrom(sponsor1), + } + + tt.Assert.NoError(balanceInsertBuilder.Add(cBalance)) + tt.Assert.NoError(insertClaimants(claimantsInsertBuilder, cBalance)) + + balanceID = xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &xdr.Hash{3, 2, 1}, + } + id, err = xdr.MarshalHex(balanceID) + tt.Assert.NoError(err) + cBalance = ClaimableBalance{ + BalanceID: id, + Claimants: []Claimant{ + { + Destination: dest1, + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + { + Destination: dest2, + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + Asset: asset, + LastModifiedLedger: 300, + Amount: 10, + Sponsor: null.StringFrom(sponsor2), + } + + tt.Assert.NoError(balanceInsertBuilder.Add(cBalance)) + tt.Assert.NoError(insertClaimants(claimantsInsertBuilder, cBalance)) + + tt.Assert.NoError(claimantsInsertBuilder.Exec(tt.Ctx)) + tt.Assert.NoError(balanceInsertBuilder.Exec(tt.Ctx)) + + query := ClaimableBalancesQuery{ + PageQuery: db2.MustPageQuery("", false, "", 10), + } + + cbs, err := q.GetClaimableBalances(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(cbs, 2) + + order := "" // default is "asc" + // this validates the cb query with claimant and cb.id/ledger cursor parameters + query.PageQuery = db2.MustPageQuery(fmt.Sprintf("%v-%s", 150, cbs[0].BalanceID), false, order, 10) + query.Claimant = xdr.MustAddressPtr(dest1) + cbs, err = q.GetClaimableBalances(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(cbs, 1) + tt.Assert.Equal(dest2, cbs[0].Claimants[1].Destination) + + // this validates the cb query with claimant, asset, sponsor and cb.id/ledger cursor parameters + query.PageQuery = db2.MustPageQuery(fmt.Sprintf("%v-%s", 150, cbs[0].BalanceID), false, order, 10) + query.Claimant = xdr.MustAddressPtr(dest1) + query.Asset = &asset + query.Sponsor = xdr.MustAddressPtr(sponsor2) + + cbs, err = q.GetClaimableBalances(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(cbs, 1) + tt.Assert.Equal(dest2, cbs[0].Claimants[1].Destination) + + // this validates the cb query with no claimant, asset, sponsor and cb.id/ledger cursor parameters + query.PageQuery = db2.MustPageQuery(fmt.Sprintf("%v-%s", 150, cbs[0].BalanceID), false, order, 10) + query.Claimant = nil + query.Asset = &asset + query.Sponsor = xdr.MustAddressPtr(sponsor2) + + cbs, err = q.GetClaimableBalances(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(cbs, 1) + tt.Assert.Equal(dest2, cbs[0].Claimants[1].Destination) + + order = "desc" + // claimant and cb.id/ledger cursor parameters + query.PageQuery = db2.MustPageQuery(fmt.Sprintf("%v-%s", 301, cbs[0].BalanceID), false, order, 10) + query.Claimant = xdr.MustAddressPtr(dest1) + cbs, err = q.GetClaimableBalances(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(cbs, 1) + tt.Assert.Equal(dest2, cbs[0].Claimants[1].Destination) + + // claimant, asset, sponsor and cb.id/ledger cursor parameters + query.PageQuery = db2.MustPageQuery(fmt.Sprintf("%v-%s", 301, cbs[0].BalanceID), false, order, 10) + query.Claimant = xdr.MustAddressPtr(dest1) + query.Asset = &asset + query.Sponsor = xdr.MustAddressPtr(sponsor2) + + cbs, err = q.GetClaimableBalances(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(cbs, 1) + tt.Assert.Equal(dest2, cbs[0].Claimants[1].Destination) + + // no claimant, asset, sponsor and cb.id/ledger cursor parameters + query.PageQuery = db2.MustPageQuery(fmt.Sprintf("%v-%s", 301, cbs[0].BalanceID), false, order, 10) + query.Claimant = nil + query.Asset = &asset + query.Sponsor = xdr.MustAddressPtr(sponsor2) + + cbs, err = q.GetClaimableBalances(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(cbs, 1) + tt.Assert.Equal(dest2, cbs[0].Claimants[1].Destination) + + order = "asc" + // claimant and cb.id/ledger cursor parameters + query.PageQuery = db2.MustPageQuery(fmt.Sprintf("%v-%s", 150, cbs[0].BalanceID), false, order, 10) + query.Claimant = xdr.MustAddressPtr(dest1) + cbs, err = q.GetClaimableBalances(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(cbs, 1) + tt.Assert.Equal(dest2, cbs[0].Claimants[1].Destination) + + // claimant, asset, sponsor and cb.id/ledger cursor parameters + query.PageQuery = db2.MustPageQuery(fmt.Sprintf("%v-%s", 150, cbs[0].BalanceID), false, order, 10) + query.Claimant = xdr.MustAddressPtr(dest1) + query.Asset = &asset + query.Sponsor = xdr.MustAddressPtr(sponsor2) + + cbs, err = q.GetClaimableBalances(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(cbs, 1) + tt.Assert.Equal(dest2, cbs[0].Claimants[1].Destination) + + // no claimant, asset, sponsor and cb.id/ledger cursor parameters + query.PageQuery = db2.MustPageQuery(fmt.Sprintf("%v-%s", 150, cbs[0].BalanceID), false, order, 10) + query.Claimant = nil + query.Asset = &asset + query.Sponsor = xdr.MustAddressPtr(sponsor2) + + cbs, err = q.GetClaimableBalances(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(cbs, 1) + tt.Assert.Equal(dest2, cbs[0].Claimants[1].Destination) +} + func insertClaimants(claimantsInsertBuilder ClaimableBalanceClaimantBatchInsertBuilder, cBalance ClaimableBalance) error { for _, claimant := range cBalance.Claimants { claimant := ClaimableBalanceClaimant{ From 02cd78480bf7962b843fc113a25cea0d6b0afde9 Mon Sep 17 00:00:00 2001 From: George Date: Thu, 8 Feb 2024 12:02:04 -0800 Subject: [PATCH 20/21] services/horizon: Add cache toggle and use libary for on-disk caching (#5197) * Add a `--history-archive-caching` flag (default=true) to toggle behavior * Refactor to use a library: fscache * Hook new metric into prometheus * Add parallel read test to stress cache * Add tests for deadlocking and other misc. scenarios --- go.mod | 5 +- go.sum | 6 + historyarchive/archive.go | 128 ++++++++-- historyarchive/archive_cache.go | 225 ------------------ historyarchive/archive_test.go | 133 ++++++++--- historyarchive/failing_mock_archive.go | 76 ++++++ historyarchive/mocks.go | 5 + historyarchive/stats.go | 9 + historyarchive/xdrstream.go | 2 +- services/horizon/cmd/db.go | 1 + services/horizon/cmd/ingest.go | 3 + services/horizon/internal/config.go | 1 + services/horizon/internal/flags.go | 17 +- services/horizon/internal/ingest/fsm.go | 5 + services/horizon/internal/ingest/main.go | 17 +- .../internal/ingest/resume_state_test.go | 3 + services/horizon/internal/init.go | 1 + 17 files changed, 339 insertions(+), 298 deletions(-) delete mode 100644 historyarchive/archive_cache.go create mode 100644 historyarchive/failing_mock_archive.go diff --git a/go.mod b/go.mod index a7fd2ef71c..17627b7674 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 github.com/aws/aws-sdk-go v1.45.26 github.com/creachadair/jrpc2 v1.1.0 + github.com/djherbis/fscache v0.10.1 github.com/elazarl/go-bindata-assetfs v1.0.1 github.com/getsentry/raven-go v0.2.0 github.com/go-chi/chi v4.1.2+incompatible @@ -84,6 +85,8 @@ require ( golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect + gopkg.in/djherbis/atime.v1 v1.0.0 // indirect + gopkg.in/djherbis/stream.v1 v1.3.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect ) @@ -102,7 +105,7 @@ require ( github.com/google/go-cmp v0.5.9 // indirect github.com/google/go-querystring v0.0.0-20160401233042-9235644dd9e5 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect - github.com/hashicorp/golang-lru v1.0.2 + github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/imkira/go-interpol v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect diff --git a/go.sum b/go.sum index bb1175e120..d2358499b2 100644 --- a/go.sum +++ b/go.sum @@ -104,6 +104,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/djherbis/fscache v0.10.1 h1:hDv+RGyvD+UDKyRYuLoVNbuRTnf2SrA2K3VyR1br9lk= +github.com/djherbis/fscache v0.10.1/go.mod h1:yyPYtkNnnPXsW+81lAcQS6yab3G2CRfnPLotBvtbf0c= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= @@ -801,6 +803,10 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/djherbis/atime.v1 v1.0.0 h1:eMRqB/JrLKocla2PBPKgQYg/p5UG4L6AUAs92aP7F60= +gopkg.in/djherbis/atime.v1 v1.0.0/go.mod h1:hQIUStKmJfvf7xdh/wtK84qe+DsTV5LnA9lzxxtPpJ8= +gopkg.in/djherbis/stream.v1 v1.3.1 h1:uGfmsOY1qqMjQQphhRBSGLyA9qumJ56exkRu9ASTjCw= +gopkg.in/djherbis/stream.v1 v1.3.1/go.mod h1:aEV8CBVRmSpLamVJfM903Npic1IKmb2qS30VAZ+sssg= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gavv/httpexpect.v1 v1.0.0-20170111145843-40724cf1e4a0 h1:r5ptJ1tBxVAeqw4CrYWhXIMr0SybY3CDHuIbCg5CFVw= diff --git a/historyarchive/archive.go b/historyarchive/archive.go index d53ab37071..bbd030ffce 100644 --- a/historyarchive/archive.go +++ b/historyarchive/archive.go @@ -11,12 +11,15 @@ import ( "fmt" "io" "net/url" + "os" "path" "regexp" "strconv" "strings" "sync" + "time" + fscache "github.com/djherbis/fscache" log "github.com/sirupsen/logrus" "github.com/stellar/go/support/errors" @@ -50,8 +53,8 @@ type ConnectOptions struct { CheckpointFrequency uint32 // UserAgent is the value of `User-Agent` header. Applicable only for HTTP client. UserAgent string - // CacheConfig controls how/if bucket files are cached on the disk. - CacheConfig CacheOptions + // CachePath controls where/if bucket files are cached on the disk. + CachePath string } type Ledger struct { @@ -117,8 +120,16 @@ type Archive struct { checkpointManager CheckpointManager backend ArchiveBackend - cache *ArchiveBucketCache stats archiveStats + + cache *archiveBucketCache +} + +type archiveBucketCache struct { + fscache.Cache + + path string + sizes sync.Map } func (arch *Archive) GetStats() []ArchiveStats { @@ -395,23 +406,79 @@ func (a *Archive) GetXdrStream(pth string) (*XdrStream, error) { } func (a *Archive) cachedGet(pth string) (io.ReadCloser, error) { - if a.cache != nil { - rdr, foundInCache, err := a.cache.GetFile(pth, a.backend) - if !foundInCache { - a.stats.incrementDownloads() - } else { - a.stats.incrementCacheHits() - } - if err == nil { - return rdr, nil + if a.cache == nil { + a.stats.incrementDownloads() + return a.backend.GetFile(pth) + } + + L := log.WithField("path", pth).WithField("cache", a.cache.path) + + rdr, wrtr, err := a.cache.Get(pth) + if err != nil { + L.WithError(err). + WithField("remove", a.cache.Remove(pth)). + Warn("On-disk cache retrieval failed") + a.stats.incrementDownloads() + return a.backend.GetFile(pth) + } + + // If a NEW key is being retrieved, it returns a writer to which + // you're expected to write your upstream as well as a reader that + // will read directly from it. + if wrtr != nil { + log.WithField("path", pth).Info("Caching file...") + a.stats.incrementDownloads() + upstreamReader, err := a.backend.GetFile(pth) + if err != nil { + writeErr := wrtr.Close() + readErr := rdr.Close() + removeErr := a.cache.Remove(pth) + // Execution order isn't guaranteed w/in a function call expression + // so we close them with explicit order first. + L.WithError(err).WithFields(log.Fields{ + "write-close": writeErr, + "read-close": readErr, + "cache-rm": removeErr, + }).Warn("Download failed, purging from cache") + return nil, err } - // If there's an error, retry with the uncached backend. - a.cache.Evict(pth) + // Start a goroutine to slurp up the upstream and feed + // it directly to the cache. + go func() { + written, err := io.Copy(wrtr, upstreamReader) + writeErr := wrtr.Close() + readErr := upstreamReader.Close() + fields := log.Fields{ + "wr-close": writeErr, + "rd-close": readErr, + } + + if err != nil { + L.WithFields(fields).WithError(err). + Warn("Failed to download and cache file") + + // Removal must happen *after* handles close. + if removalErr := a.cache.Remove(pth); removalErr != nil { + L.WithError(removalErr).Warn("Removing cached file failed") + } + } else { + L.WithFields(fields).Infof("Cached %dKiB file", written/1024) + + // Track how much bandwidth we've saved from caching by saving + // the size of the file we just downloaded. + a.cache.sizes.Store(pth, written) + } + }() + } else { + // Best-effort check to track bandwidth metrics + if written, found := a.cache.sizes.Load(pth); found { + a.stats.incrementCacheBandwidth(written.(int64)) + } + a.stats.incrementCacheHits() } - a.stats.incrementDownloads() - return a.backend.GetFile(pth) + return rdr, nil } func (a *Archive) cachedExists(pth string) (bool, error) { @@ -468,6 +535,8 @@ func Connect(u string, opts ConnectOptions) (*Archive, error) { arch.backend = makeHttpBackend(parsed, opts) } else if parsed.Scheme == "mock" { arch.backend = makeMockBackend(opts) + } else if parsed.Scheme == "fmock" { + arch.backend = makeFailingMockBackend(opts) } else { err = errors.New("unknown URL scheme: '" + parsed.Scheme + "'") } @@ -476,13 +545,30 @@ func Connect(u string, opts ConnectOptions) (*Archive, error) { return &arch, err } - if opts.CacheConfig.Cache { - cache, innerErr := MakeArchiveBucketCache(opts.CacheConfig) - if innerErr != nil { - return &arch, innerErr + if opts.CachePath != "" { + // Set up a <= ~10GiB LRU cache for history archives files + haunter := fscache.NewLRUHaunterStrategy( + fscache.NewLRUHaunter(0, 10<<30, time.Minute /* frequency check */), + ) + + // Wipe any existing cache on startup + os.RemoveAll(opts.CachePath) + fs, err := fscache.NewFs(opts.CachePath, 0755 /* drwxr-xr-x */) + + if err != nil { + return &arch, errors.Wrapf(err, + "creating cache at '%s' with mode 0755 failed", + opts.CachePath) + } + + cache, err := fscache.NewCacheWithHaunter(fs, haunter) + if err != nil { + return &arch, errors.Wrapf(err, + "creating cache at '%s' failed", + opts.CachePath) } - arch.cache = cache + arch.cache = &archiveBucketCache{cache, opts.CachePath, sync.Map{}} } arch.stats = archiveStats{backendName: parsed.String()} diff --git a/historyarchive/archive_cache.go b/historyarchive/archive_cache.go deleted file mode 100644 index fa279fffd2..0000000000 --- a/historyarchive/archive_cache.go +++ /dev/null @@ -1,225 +0,0 @@ -package historyarchive - -import ( - "io" - "os" - "path" - - lru "github.com/hashicorp/golang-lru" - log "github.com/stellar/go/support/log" -) - -type CacheOptions struct { - Cache bool - - Path string - MaxFiles uint - Log *log.Entry -} - -type ArchiveBucketCache struct { - path string - lru *lru.Cache - log *log.Entry -} - -// MakeArchiveBucketCache creates a cache on the disk at the given path that -// acts as an LRU cache, mimicking a particular upstream. -func MakeArchiveBucketCache(opts CacheOptions) (*ArchiveBucketCache, error) { - log_ := opts.Log - if opts.Log == nil { - log_ = log.WithField("subservice", "fs-cache") - } - log_ = log_. - WithField("path", opts.Path). - WithField("cap", opts.MaxFiles) - - if _, err := os.Stat(opts.Path); err == nil || os.IsExist(err) { - log_.Warnf("Cache directory already exists, removing") - os.RemoveAll(opts.Path) - } - - backend := &ArchiveBucketCache{ - path: opts.Path, - log: log_, - } - - cache, err := lru.NewWithEvict(int(opts.MaxFiles), backend.onEviction) - if err != nil { - return &ArchiveBucketCache{}, err - } - backend.lru = cache - - log_.Info("Bucket cache initialized") - return backend, nil -} - -// GetFile retrieves the file contents from the local cache if present. -// Otherwise, it returns the same result as the upstream, adding that result -// into the local cache if possible. It returns a 3-tuple of a reader (which may -// be nil on an error), an indication of whether or not it was *found* in the -// cache, and any error. -func (abc *ArchiveBucketCache) GetFile( - filepath string, - upstream ArchiveBackend, -) (io.ReadCloser, bool, error) { - L := abc.log.WithField("key", filepath) - localPath := path.Join(abc.path, filepath) - - // If the lockfile exists, we should defer to the remote source but *not* - // update the cache, as it means there's an in-progress sync of the same - // file. - _, statErr := os.Stat(NameLockfile(localPath)) - if statErr == nil || os.IsExist(statErr) { - L.Info("Incomplete file in on-disk cache: deferring") - reader, err := upstream.GetFile(filepath) - return reader, false, err - } else if _, ok := abc.lru.Get(localPath); !ok { - L.Info("File does not exist in the cache: downloading") - - // Since it's not on-disk, pull it from the remote backend, shove it - // into the cache, and write it to disk. - remote, err := upstream.GetFile(filepath) - if err != nil { - return remote, false, err - } - - local, err := abc.createLocal(filepath) - if err != nil { - // If there's some local FS error, we can still continue with the - // remote version, so just log it and continue. - L.WithError(err).Warn("Creating cache file failed") - return remote, false, nil - } - - return teeReadCloser(remote, local, func() error { - L.Debug("Download complete: removing lockfile") - return os.Remove(NameLockfile(localPath)) - }), false, nil - } - - L.Info("Found file in cache") - // The cache claims it exists, so just give it a read and send it. - local, err := os.Open(localPath) - if err != nil { - // Uh-oh, the cache and the disk are not in sync somehow? Let's evict - // this value and try again (recurse) w/ the remote version. - L.WithError(err).Warn("Opening cached file failed") - abc.lru.Remove(localPath) - return abc.GetFile(filepath, upstream) - } - - return local, true, nil -} - -func (abc *ArchiveBucketCache) Exists(filepath string) bool { - localPath := path.Join(abc.path, filepath) - - // First, check if the file exists in the cache. - if abc.lru.Contains(localPath) { - return true - } - - // If it doesn't, it may still exist on the disk which is still a cheaper - // check than going upstream. - // - // Note that this means the cache and disk are out of sync (perhaps due to - // other archives using the same cache location) so we can update it. This - // situation is well-handled by `GetFile`. - _, statErr := os.Stat(localPath) - if statErr == nil || os.IsExist(statErr) { - abc.lru.Add(localPath, struct{}{}) - return true - } - - return false -} - -// Close purges the cache and cleans up the filesystem. -func (abc *ArchiveBucketCache) Close() error { - abc.lru.Purge() - return os.RemoveAll(abc.path) -} - -// Evict removes a file from the cache and the filesystem. -func (abc *ArchiveBucketCache) Evict(filepath string) { - log.WithField("key", filepath).Info("Evicting file from the disk") - abc.lru.Remove(path.Join(abc.path, filepath)) -} - -func (abc *ArchiveBucketCache) onEviction(key, value interface{}) { - path := key.(string) - os.Remove(NameLockfile(path)) // just in case - if err := os.Remove(path); err != nil { // best effort removal - abc.log.WithError(err). - WithField("key", path). - Warn("Removal failed after cache eviction") - } -} - -func (abc *ArchiveBucketCache) createLocal(filepath string) (*os.File, error) { - localPath := path.Join(abc.path, filepath) - if err := os.MkdirAll(path.Dir(localPath), 0755 /* drwxr-xr-x */); err != nil { - return nil, err - } - - local, err := os.Create(localPath) /* mode -rw-rw-rw- */ - if err != nil { - return nil, err - } - _, err = os.Create(NameLockfile(localPath)) - if err != nil { - return nil, err - } - - abc.lru.Add(localPath, struct{}{}) // just use the cache as an array - return local, nil -} - -func NameLockfile(file string) string { - return file + ".lock" -} - -// The below is a helper interface so that we can use io.TeeReader to write -// data locally immediately as we read it remotely. - -type trc struct { - io.Reader - close func() error - closed bool // prevents a double-close -} - -func (t trc) Close() error { - if t.closed { - return nil - } - - return t.close() -} - -func teeReadCloser(r io.ReadCloser, w io.WriteCloser, onClose func() error) io.ReadCloser { - closer := trc{ - Reader: io.TeeReader(r, w), - closed: false, - } - closer.close = func() error { - if closer.closed { - return nil - } - - // Always run all closers, but return the first error - err1 := r.Close() - err2 := w.Close() - err3 := onClose() - - closer.closed = true - if err1 != nil { - return err1 - } else if err2 != nil { - return err2 - } - return err3 - } - - return closer -} diff --git a/historyarchive/archive_test.go b/historyarchive/archive_test.go index 4518315f3e..5f4fc00f4c 100644 --- a/historyarchive/archive_test.go +++ b/historyarchive/archive_test.go @@ -18,12 +18,17 @@ import ( "os" "path/filepath" "strings" + "sync" "testing" + "time" "github.com/stellar/go/xdr" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) +var cachePath = filepath.Join(os.TempDir(), "history-archive-test-cache") + func GetTestS3Archive() *Archive { mx := big.NewInt(0xffffffff) r, e := rand.Int(rand.Reader, mx) @@ -44,11 +49,8 @@ func GetTestS3Archive() *Archive { func GetTestMockArchive() *Archive { return MustConnect("mock://test", ConnectOptions{ CheckpointFrequency: 64, - CacheConfig: CacheOptions{ - Cache: true, - Path: filepath.Join(os.TempDir(), "history-archive-test-cache"), - MaxFiles: 5, - }}) + CachePath: cachePath, + }) } var tmpdirs []string @@ -554,7 +556,95 @@ func TestGetLedgers(t *testing.T) { assert.Equal(t, uint32(1), archive.GetStats()[0].GetRequests()) assert.Equal(t, uint32(0), archive.GetStats()[0].GetDownloads()) assert.EqualError(t, err, "checkpoint 1023 is not published") + ledgerHeaders, transactions, results := makeFakeArchive(t, archive) + + stats := archive.GetStats()[0] + ledgers, err := archive.GetLedgers(1000, 1002) + + assert.NoError(t, err) + assert.Len(t, ledgers, 3) + // it started at 1, incurred 6 requests total: 3 queries + 3 downloads + assert.EqualValues(t, 7, stats.GetRequests()) + // started 0, incurred 3 file downloads + assert.EqualValues(t, 3, stats.GetDownloads()) + assert.EqualValues(t, 0, stats.GetCacheHits()) + for i, seq := range []uint32{1000, 1001, 1002} { + ledger := ledgers[seq] + assertXdrEquals(t, ledgerHeaders[i], ledger.Header) + assertXdrEquals(t, transactions[i], ledger.Transaction) + assertXdrEquals(t, results[i], ledger.TransactionResult) + } + + // Repeat the same check but ensure the cache was used + ledgers, err = archive.GetLedgers(1000, 1002) // all cached + assert.NoError(t, err) + assert.Len(t, ledgers, 3) + // downloads should not change because of the cache + assert.EqualValues(t, 3, stats.GetDownloads()) + // but requests increase because of 3 fetches to categories + assert.EqualValues(t, 10, stats.GetRequests()) + assert.EqualValues(t, 3, stats.GetCacheHits()) + for i, seq := range []uint32{1000, 1001, 1002} { + ledger := ledgers[seq] + assertXdrEquals(t, ledgerHeaders[i], ledger.Header) + assertXdrEquals(t, transactions[i], ledger.Transaction) + assertXdrEquals(t, results[i], ledger.TransactionResult) + } + + // remove the cached files without informing it and ensure it fills up again + require.NoError(t, os.RemoveAll(cachePath)) + ledgers, err = archive.GetLedgers(1000, 1002) // uncached, refetch + assert.NoError(t, err) + assert.Len(t, ledgers, 3) + + // downloads should increase again + assert.EqualValues(t, 6, stats.GetDownloads()) + assert.EqualValues(t, 3, stats.GetCacheHits()) +} + +func TestStressfulGetLedgers(t *testing.T) { + archive := GetTestMockArchive() + ledgerHeaders, transactions, results := makeFakeArchive(t, archive) + + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + + go func() { + time.Sleep(time.Millisecond) // encourage interleaved execution + ledgers, err := archive.GetLedgers(1000, 1002) + assert.NoError(t, err) + assert.Len(t, ledgers, 3) + for i, seq := range []uint32{1000, 1001, 1002} { + ledger := ledgers[seq] + assertXdrEquals(t, ledgerHeaders[i], ledger.Header) + assertXdrEquals(t, transactions[i], ledger.Transaction) + assertXdrEquals(t, results[i], ledger.TransactionResult) + } + + wg.Done() + }() + } + + require.Eventually(t, func() bool { wg.Wait(); return true }, time.Minute, time.Second) +} + +func TestCacheDeadlocks(t *testing.T) { + archive := MustConnect("fmock://test", ConnectOptions{ + CheckpointFrequency: 64, + CachePath: cachePath, + }) + makeFakeArchive(t, archive) + _, err := archive.GetLedgers(1000, 1002) + require.Error(t, err) +} + +func makeFakeArchive(t *testing.T, archive *Archive) ( + []xdr.LedgerHeaderHistoryEntry, + []xdr.TransactionHistoryEntry, + []xdr.TransactionHistoryResultEntry, +) { ledgerHeaders := []xdr.LedgerHeaderHistoryEntry{ { Hash: xdr.Hash{1}, @@ -637,36 +727,5 @@ func TestGetLedgers(t *testing.T) { []xdrEntry{results[0], results[1], results[2]}, ) - stats := archive.GetStats()[0] - ledgers, err := archive.GetLedgers(1000, 1002) - - assert.NoError(t, err) - assert.Len(t, ledgers, 3) - // it started at 1, incurred 6 requests total, 3 queries, 3 downloads - assert.EqualValues(t, 7, stats.GetRequests()) - // started 0, incurred 3 file downloads - assert.EqualValues(t, 3, stats.GetDownloads()) - for i, seq := range []uint32{1000, 1001, 1002} { - ledger := ledgers[seq] - assertXdrEquals(t, ledgerHeaders[i], ledger.Header) - assertXdrEquals(t, transactions[i], ledger.Transaction) - assertXdrEquals(t, results[i], ledger.TransactionResult) - } - - // Repeat the same check but ensure the cache was used - ledgers, err = archive.GetLedgers(1000, 1002) // all cached - assert.NoError(t, err) - assert.Len(t, ledgers, 3) - - // downloads should not change because of the cache - assert.EqualValues(t, 3, stats.GetDownloads()) - // but requests increase because of 3 fetches to categories - assert.EqualValues(t, 10, stats.GetRequests()) - assert.EqualValues(t, 3, stats.GetCacheHits()) - for i, seq := range []uint32{1000, 1001, 1002} { - ledger := ledgers[seq] - assertXdrEquals(t, ledgerHeaders[i], ledger.Header) - assertXdrEquals(t, transactions[i], ledger.Transaction) - assertXdrEquals(t, results[i], ledger.TransactionResult) - } + return ledgerHeaders, transactions, results } diff --git a/historyarchive/failing_mock_archive.go b/historyarchive/failing_mock_archive.go new file mode 100644 index 0000000000..5966cb30e7 --- /dev/null +++ b/historyarchive/failing_mock_archive.go @@ -0,0 +1,76 @@ +package historyarchive + +import ( + "io" + + "github.com/stellar/go/support/errors" +) + +// FailingMockArchiveBackend is a mocking backend that will fail only when you +// try to read but otherwise behave like MockArchiveBackend. +type FailingMockArchiveBackend struct { + files map[string][]byte +} + +func (b *FailingMockArchiveBackend) Exists(pth string) (bool, error) { + _, ok := b.files[pth] + return ok, nil +} + +func (b *FailingMockArchiveBackend) Size(pth string) (int64, error) { + f, ok := b.files[pth] + sz := int64(0) + if ok { + sz = int64(len(f)) + } + return sz, nil +} + +func (b *FailingMockArchiveBackend) GetFile(pth string) (io.ReadCloser, error) { + data, ok := b.files[pth] + if !ok { + return nil, errors.New("file does not exist") + } + + fr := FakeReader{} + fr.data = make([]byte, len(data)) + copy(fr.data[:], data[:]) + return &fr, nil +} + +func (b *FailingMockArchiveBackend) PutFile(pth string, in io.ReadCloser) error { + buf, e := io.ReadAll(in) + if e != nil { + return e + } + b.files[pth] = buf + return nil +} + +func (b *FailingMockArchiveBackend) ListFiles(pth string) (chan string, chan error) { + return nil, nil +} + +func (b *FailingMockArchiveBackend) CanListFiles() bool { + return false +} + +func makeFailingMockBackend(opts ConnectOptions) ArchiveBackend { + b := new(FailingMockArchiveBackend) + b.files = make(map[string][]byte) + return b +} + +type FakeReader struct { + data []byte +} + +func (fr *FakeReader) Read(b []byte) (int, error) { + return 0, io.ErrClosedPipe +} + +func (fr *FakeReader) Close() error { + return nil +} + +var _ io.ReadCloser = &FakeReader{} diff --git a/historyarchive/mocks.go b/historyarchive/mocks.go index fe497ec36e..fa5716e5de 100644 --- a/historyarchive/mocks.go +++ b/historyarchive/mocks.go @@ -137,3 +137,8 @@ func (m *MockArchiveStats) GetCacheHits() uint32 { a := m.Called() return a.Get(0).(uint32) } + +func (m *MockArchiveStats) GetCacheBandwidth() uint64 { + a := m.Called() + return a.Get(0).(uint64) +} diff --git a/historyarchive/stats.go b/historyarchive/stats.go index c182853d1b..6dbf8ceed2 100644 --- a/historyarchive/stats.go +++ b/historyarchive/stats.go @@ -8,6 +8,7 @@ type archiveStats struct { fileDownloads atomic.Uint32 fileUploads atomic.Uint32 cacheHits atomic.Uint32 + cacheBw atomic.Uint64 backendName string } @@ -16,6 +17,7 @@ type ArchiveStats interface { GetDownloads() uint32 GetUploads() uint32 GetCacheHits() uint32 + GetCacheBandwidth() uint64 GetBackendName() string } @@ -37,6 +39,10 @@ func (as *archiveStats) incrementCacheHits() { as.cacheHits.Add(1) } +func (as *archiveStats) incrementCacheBandwidth(bytes int64) { + as.cacheBw.Add(uint64(bytes)) +} + func (as *archiveStats) GetRequests() uint32 { return as.requests.Load() } @@ -55,3 +61,6 @@ func (as *archiveStats) GetBackendName() string { func (as *archiveStats) GetCacheHits() uint32 { return as.cacheHits.Load() } +func (as *archiveStats) GetCacheBandwidth() uint64 { + return as.cacheBw.Load() +} diff --git a/historyarchive/xdrstream.go b/historyarchive/xdrstream.go index de8efc3bb6..313c600f8b 100644 --- a/historyarchive/xdrstream.go +++ b/historyarchive/xdrstream.go @@ -107,7 +107,7 @@ func (x *XdrStream) ExpectedHash() ([sha256.Size]byte, bool) { func (x *XdrStream) Close() error { if x.validateHash { // Read all remaining data from rdr - _, err := io.Copy(ioutil.Discard, x.rdr) + _, err := io.Copy(io.Discard, x.rdr) if err != nil { // close the internal readers to avoid memory leaks x.closeReaders() diff --git a/services/horizon/cmd/db.go b/services/horizon/cmd/db.go index 07bbf975fa..7d14ca314e 100644 --- a/services/horizon/cmd/db.go +++ b/services/horizon/cmd/db.go @@ -407,6 +407,7 @@ func runDBReingestRange(ledgerRanges []history.LedgerRange, reingestForce bool, ingestConfig := ingest.Config{ NetworkPassphrase: config.NetworkPassphrase, HistoryArchiveURLs: config.HistoryArchiveURLs, + HistoryArchiveCaching: config.HistoryArchiveCaching, CheckpointFrequency: config.CheckpointFrequency, ReingestEnabled: true, MaxReingestRetries: int(retries), diff --git a/services/horizon/cmd/ingest.go b/services/horizon/cmd/ingest.go index 3833dba7fd..18452dc74a 100644 --- a/services/horizon/cmd/ingest.go +++ b/services/horizon/cmd/ingest.go @@ -128,6 +128,7 @@ var ingestVerifyRangeCmd = &cobra.Command{ NetworkPassphrase: globalConfig.NetworkPassphrase, HistorySession: horizonSession, HistoryArchiveURLs: globalConfig.HistoryArchiveURLs, + HistoryArchiveCaching: globalConfig.HistoryArchiveCaching, CaptiveCoreBinaryPath: globalConfig.CaptiveCoreBinaryPath, CaptiveCoreConfigUseDB: globalConfig.CaptiveCoreConfigUseDB, CheckpointFrequency: globalConfig.CheckpointFrequency, @@ -210,6 +211,7 @@ var ingestStressTestCmd = &cobra.Command{ NetworkPassphrase: globalConfig.NetworkPassphrase, HistorySession: horizonSession, HistoryArchiveURLs: globalConfig.HistoryArchiveURLs, + HistoryArchiveCaching: globalConfig.HistoryArchiveCaching, RoundingSlippageFilter: globalConfig.RoundingSlippageFilter, CaptiveCoreBinaryPath: globalConfig.CaptiveCoreBinaryPath, CaptiveCoreConfigUseDB: globalConfig.CaptiveCoreConfigUseDB, @@ -349,6 +351,7 @@ var ingestBuildStateCmd = &cobra.Command{ NetworkPassphrase: globalConfig.NetworkPassphrase, HistorySession: horizonSession, HistoryArchiveURLs: globalConfig.HistoryArchiveURLs, + HistoryArchiveCaching: globalConfig.HistoryArchiveCaching, CaptiveCoreBinaryPath: globalConfig.CaptiveCoreBinaryPath, CaptiveCoreConfigUseDB: globalConfig.CaptiveCoreConfigUseDB, CheckpointFrequency: globalConfig.CheckpointFrequency, diff --git a/services/horizon/internal/config.go b/services/horizon/internal/config.go index 8fb31075b8..54f843b810 100644 --- a/services/horizon/internal/config.go +++ b/services/horizon/internal/config.go @@ -27,6 +27,7 @@ type Config struct { CaptiveCoreStoragePath string CaptiveCoreReuseStoragePath bool CaptiveCoreConfigUseDB bool + HistoryArchiveCaching bool StellarCoreURL string diff --git a/services/horizon/internal/flags.go b/services/horizon/internal/flags.go index eb229c65b2..87deb28c48 100644 --- a/services/horizon/internal/flags.go +++ b/services/horizon/internal/flags.go @@ -51,6 +51,9 @@ const ( NetworkPassphraseFlagName = "network-passphrase" // HistoryArchiveURLsFlagName is the command line flag for specifying the history archive URLs HistoryArchiveURLsFlagName = "history-archive-urls" + // HistoryArchiveCaching is the flag for controlling whether or not there's + // an on-disk cache for history archive downloads + HistoryArchiveCachingFlagName = "history-archive-caching" // NetworkFlagName is the command line flag for specifying the "network" NetworkFlagName = "network" // EnableIngestionFilteringFlagName is the command line flag for enabling the experimental ingestion filtering feature (now enabled by default) @@ -236,11 +239,7 @@ func Flags() (*Config, support.ConfigOptions) { OptType: types.Bool, FlagDefault: true, Required: false, - Usage: `when enabled, Horizon ingestion will instruct the captive - core invocation to use an external db url for ledger states rather than in memory(RAM).\n - Will result in several GB of space shifting out of RAM and to the external db persistence.\n - The external db url is determined by the presence of DATABASE parameter in the captive-core-config-path or\n - or if absent, the db will default to sqlite and the db file will be stored at location derived from captive-core-storage-path parameter.`, + Usage: `when enabled, Horizon ingestion will instruct the captive core invocation to use an external db url for ledger states rather than in memory(RAM). Will result in several GB of space shifting out of RAM and to the external db persistence. The external db url is determined by the presence of DATABASE parameter in the captive-core-config-path or if absent, the db will default to sqlite and the db file will be stored at location derived from captive-core-storage-path parameter.`, CustomSetValue: func(opt *support.ConfigOption) error { if val := viper.GetBool(opt.Name); val { config.CaptiveCoreConfigUseDB = val @@ -372,6 +371,14 @@ func Flags() (*Config, support.ConfigOptions) { Usage: "comma-separated list of stellar history archives to connect with", UsedInCommands: IngestionCommands, }, + &support.ConfigOption{ + Name: HistoryArchiveCachingFlagName, + ConfigKey: &config.HistoryArchiveCaching, + OptType: types.Bool, + FlagDefault: true, + Usage: "adds caching for history archive downloads (requires an add'l 10GB of disk space on mainnet)", + UsedInCommands: IngestionCommands, + }, &support.ConfigOption{ Name: "port", ConfigKey: &config.Port, diff --git a/services/horizon/internal/ingest/fsm.go b/services/horizon/internal/ingest/fsm.go index e0c667b033..892868e5b9 100644 --- a/services/horizon/internal/ingest/fsm.go +++ b/services/horizon/internal/ingest/fsm.go @@ -595,6 +595,11 @@ func addHistoryArchiveStatsMetrics(s *system, stats []historyarchive.ArchiveStat "source": historyServerStat.GetBackendName(), "type": "cache_hits"}). Add(float64(historyServerStat.GetCacheHits())) + s.Metrics().HistoryArchiveStatsCounter. + With(prometheus.Labels{ + "source": historyServerStat.GetBackendName(), + "type": "cache_bandwidth"}). + Add(float64(historyServerStat.GetCacheBandwidth())) } } diff --git a/services/horizon/internal/ingest/main.go b/services/horizon/internal/ingest/main.go index 7dfaea366e..91bbf0b10f 100644 --- a/services/horizon/internal/ingest/main.go +++ b/services/horizon/internal/ingest/main.go @@ -87,8 +87,9 @@ type Config struct { CaptiveCoreConfigUseDB bool NetworkPassphrase string - HistorySession db.SessionInterface - HistoryArchiveURLs []string + HistorySession db.SessionInterface + HistoryArchiveURLs []string + HistoryArchiveCaching bool DisableStateVerification bool EnableReapLookupTables bool @@ -222,6 +223,11 @@ type system struct { func NewSystem(config Config) (System, error) { ctx, cancel := context.WithCancel(context.Background()) + cachingPath := "" + if config.HistoryArchiveCaching { + cachingPath = path.Join(config.CaptiveCoreStoragePath, "bucket-cache") + } + archive, err := historyarchive.NewArchivePool( config.HistoryArchiveURLs, historyarchive.ConnectOptions{ @@ -229,12 +235,7 @@ func NewSystem(config Config) (System, error) { NetworkPassphrase: config.NetworkPassphrase, CheckpointFrequency: config.CheckpointFrequency, UserAgent: fmt.Sprintf("horizon/%s golang/%s", apkg.Version(), runtime.Version()), - CacheConfig: historyarchive.CacheOptions{ - Cache: true, - Path: path.Join(config.CaptiveCoreStoragePath, "bucket-cache"), - Log: log.WithField("subservice", "ha-cache"), - MaxFiles: 150, - }, + CachePath: cachingPath, }, ) if err != nil { diff --git a/services/horizon/internal/ingest/resume_state_test.go b/services/horizon/internal/ingest/resume_state_test.go index f1f8b2ce2a..985391883f 100644 --- a/services/horizon/internal/ingest/resume_state_test.go +++ b/services/horizon/internal/ingest/resume_state_test.go @@ -267,6 +267,7 @@ func (s *ResumeTestTestSuite) mockSuccessfulIngestion() { mockStats.On("GetRequests").Return(uint32(0)) mockStats.On("GetUploads").Return(uint32(0)) mockStats.On("GetCacheHits").Return(uint32(0)) + mockStats.On("GetCacheBandwidth").Return(uint64(0)) s.historyAdapter.On("GetStats").Return([]historyarchive.ArchiveStats{mockStats}).Once() s.runner.On("RunAllProcessorsOnLedger", mock.AnythingOfType("xdr.LedgerCloseMeta")). @@ -384,6 +385,7 @@ func (s *ResumeTestTestSuite) TestReapingObjectsDisabled() { mockStats.On("GetRequests").Return(uint32(0)) mockStats.On("GetUploads").Return(uint32(0)) mockStats.On("GetCacheHits").Return(uint32(0)) + mockStats.On("GetCacheBandwidth").Return(uint64(0)) s.historyAdapter.On("GetStats").Return([]historyarchive.ArchiveStats{mockStats}).Once() // Reap lookup tables not executed @@ -434,6 +436,7 @@ func (s *ResumeTestTestSuite) TestErrorReapingObjectsIgnored() { mockStats.On("GetRequests").Return(uint32(0)) mockStats.On("GetUploads").Return(uint32(0)) mockStats.On("GetCacheHits").Return(uint32(0)) + mockStats.On("GetCacheBandwidth").Return(uint64(0)) s.historyAdapter.On("GetStats").Return([]historyarchive.ArchiveStats{mockStats}).Once() next, err := resumeState{latestSuccessfullyProcessedLedger: 100}.run(s.system) diff --git a/services/horizon/internal/init.go b/services/horizon/internal/init.go index d4b34f9f4d..60ba7b6c2a 100644 --- a/services/horizon/internal/init.go +++ b/services/horizon/internal/init.go @@ -97,6 +97,7 @@ func initIngester(app *App) { ), NetworkPassphrase: app.config.NetworkPassphrase, HistoryArchiveURLs: app.config.HistoryArchiveURLs, + HistoryArchiveCaching: app.config.HistoryArchiveCaching, CheckpointFrequency: app.config.CheckpointFrequency, StellarCoreURL: app.config.StellarCoreURL, CaptiveCoreBinaryPath: app.config.CaptiveCoreBinaryPath, From f232a9b6f58f479d5156f87b78ebde08fa65c843 Mon Sep 17 00:00:00 2001 From: urvisavla Date: Thu, 8 Feb 2024 13:54:16 -0800 Subject: [PATCH 21/21] Update CHANGELOG.md (#5201) --- services/horizon/CHANGELOG.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/services/horizon/CHANGELOG.md b/services/horizon/CHANGELOG.md index 193b731298..2fef4a1b82 100644 --- a/services/horizon/CHANGELOG.md +++ b/services/horizon/CHANGELOG.md @@ -3,7 +3,11 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org/). -## Unreleased +## 2.28.2 + +### Fixed +- History archive caching would cause file corruption in certain environments [5197](https://github.com/stellar/go/pull/5197) +- Server error in claimable balance API when claimant, asset and cursor query params are supplied [5200](https://github.com/stellar/go/pull/5200) ## 2.28.1