diff --git a/.github/workflows/horizon.yml b/.github/workflows/horizon.yml
index 598da76bca..df314900d2 100644
--- a/.github/workflows/horizon.yml
+++ b/.github/workflows/horizon.yml
@@ -33,9 +33,9 @@ jobs:
env:
HORIZON_INTEGRATION_TESTS_ENABLED: true
HORIZON_INTEGRATION_TESTS_CORE_MAX_SUPPORTED_PROTOCOL: ${{ matrix.protocol-version }}
- PROTOCOL_20_CORE_DEBIAN_PKG_VERSION: 20.0.2-1633.669916b56.focal
- PROTOCOL_20_CORE_DOCKER_IMG: stellar/unsafe-stellar-core:20.0.2-1633.669916b56.focal
- PROTOCOL_20_SOROBAN_RPC_DOCKER_IMG: stellar/soroban-rpc:20.0.2-47
+ PROTOCOL_20_CORE_DEBIAN_PKG_VERSION: 20.1.0-1656.114b833e7.focal
+ PROTOCOL_20_CORE_DOCKER_IMG: stellar/unsafe-stellar-core:20.1.0-1656.114b833e7.focal
+ PROTOCOL_20_SOROBAN_RPC_DOCKER_IMG: stellar/soroban-rpc:20.2.0
PROTOCOL_19_CORE_DEBIAN_PKG_VERSION: 19.14.0-1500.5664eff4e.focal
PROTOCOL_19_CORE_DOCKER_IMG: stellar/stellar-core:19.14.0-1500.5664eff4e.focal
PGHOST: localhost
@@ -120,7 +120,7 @@ jobs:
key: ${{ env.COMBINED_SOURCE_HASH }}
- if: ${{ steps.horizon_binary_tests_hash.outputs.cache-hit != 'true' }}
- run: go test -race -timeout 45m -v ./services/horizon/internal/integration/...
+ run: go test -race -timeout 65m -v ./services/horizon/internal/integration/...
- name: Save Horizon binary and integration tests source hash to cache
if: ${{ success() && steps.horizon_binary_tests_hash.outputs.cache-hit != 'true' }}
diff --git a/historyarchive/archive.go b/historyarchive/archive.go
index 1679d2210f..e6a75b69bd 100644
--- a/historyarchive/archive.go
+++ b/historyarchive/archive.go
@@ -10,7 +10,6 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
"net/url"
"path"
"regexp"
@@ -46,8 +45,9 @@ type ArchiveOptions struct {
// CheckpointFrequency is the number of ledgers between checkpoints
// if unset, DefaultCheckpointFrequency will be used
CheckpointFrequency uint32
-
storage.ConnectOptions
+ // CacheConfig controls how/if bucket files are cached on the disk.
+ CacheConfig CacheOptions
}
type Ledger struct {
@@ -75,6 +75,7 @@ type ArchiveInterface interface {
GetXdrStreamForHash(hash Hash) (*XdrStream, error)
GetXdrStream(pth string) (*XdrStream, error)
GetCheckpointManager() CheckpointManager
+ GetStats() []ArchiveStats
}
var _ ArchiveInterface = &Archive{}
@@ -103,6 +104,12 @@ type Archive struct {
checkpointManager CheckpointManager
backend storage.Storage
+ cache *ArchiveBucketCache
+ stats archiveStats
+}
+
+func (arch *Archive) GetStats() []ArchiveStats {
+ return []ArchiveStats{&arch.stats}
}
func (arch *Archive) GetCheckpointManager() CheckpointManager {
@@ -112,6 +119,7 @@ func (arch *Archive) GetCheckpointManager() CheckpointManager {
func (a *Archive) GetPathHAS(path string) (HistoryArchiveState, error) {
var has HistoryArchiveState
rdr, err := a.backend.GetFile(path)
+ a.stats.incrementDownloads()
if err != nil {
return has, err
}
@@ -138,6 +146,7 @@ func (a *Archive) GetPathHAS(path string) (HistoryArchiveState, error) {
func (a *Archive) PutPathHAS(path string, has HistoryArchiveState, opts *CommandOptions) error {
exists, err := a.backend.Exists(path)
+ a.stats.incrementRequests()
if err != nil {
return err
}
@@ -149,19 +158,21 @@ func (a *Archive) PutPathHAS(path string, has HistoryArchiveState, opts *Command
if err != nil {
return err
}
- return a.backend.PutFile(path,
- ioutil.NopCloser(bytes.NewReader(buf)))
+ a.stats.incrementUploads()
+ return a.backend.PutFile(path, io.NopCloser(bytes.NewReader(buf)))
}
func (a *Archive) BucketExists(bucket Hash) (bool, error) {
- return a.backend.Exists(BucketPath(bucket))
+ return a.cachedExists(BucketPath(bucket))
}
func (a *Archive) BucketSize(bucket Hash) (int64, error) {
+ a.stats.incrementRequests()
return a.backend.Size(BucketPath(bucket))
}
func (a *Archive) CategoryCheckpointExists(cat string, chk uint32) (bool, error) {
+ a.stats.incrementRequests()
return a.backend.Exists(CategoryCheckpointPath(cat, chk))
}
@@ -294,14 +305,17 @@ func (a *Archive) PutRootHAS(has HistoryArchiveState, opts *CommandOptions) erro
}
func (a *Archive) ListBucket(dp DirPrefix) (chan string, chan error) {
+ a.stats.incrementRequests()
return a.backend.ListFiles(path.Join("bucket", dp.Path()))
}
func (a *Archive) ListAllBuckets() (chan string, chan error) {
+ a.stats.incrementRequests()
return a.backend.ListFiles("bucket")
}
func (a *Archive) ListAllBucketHashes() (chan Hash, chan error) {
+ a.stats.incrementRequests()
sch, errs := a.backend.ListFiles("bucket")
ch := make(chan Hash)
rx := regexp.MustCompile("bucket" + hexPrefixPat + "bucket-([0-9a-f]{64})\\.xdr\\.gz$")
@@ -322,6 +336,7 @@ func (a *Archive) ListCategoryCheckpoints(cat string, pth string) (chan uint32,
ext := categoryExt(cat)
rx := regexp.MustCompile(cat + hexPrefixPat + cat +
"-([0-9a-f]{8})\\." + regexp.QuoteMeta(ext) + "$")
+ a.stats.incrementRequests()
sch, errs := a.backend.ListFiles(path.Join(cat, pth))
ch := make(chan uint32)
errs = makeErrorPump(errs)
@@ -359,13 +374,42 @@ func (a *Archive) GetXdrStream(pth string) (*XdrStream, error) {
if !strings.HasSuffix(pth, ".xdr.gz") {
return nil, errors.New("File has non-.xdr.gz suffix: " + pth)
}
- rdr, err := a.backend.GetFile(pth)
+ rdr, err := a.cachedGet(pth)
if err != nil {
return nil, err
}
return NewXdrGzStream(rdr)
}
+func (a *Archive) cachedGet(pth string) (io.ReadCloser, error) {
+ if a.cache != nil {
+ rdr, foundInCache, err := a.cache.GetFile(pth, a.backend)
+ if !foundInCache {
+ a.stats.incrementDownloads()
+ } else {
+ a.stats.incrementCacheHits()
+ }
+ if err == nil {
+ return rdr, nil
+ }
+
+ // If there's an error, retry with the uncached backend.
+ a.cache.Evict(pth)
+ }
+
+ a.stats.incrementDownloads()
+ return a.backend.GetFile(pth)
+}
+
+func (a *Archive) cachedExists(pth string) (bool, error) {
+ if a.cache != nil && a.cache.Exists(pth) {
+ return true, nil
+ }
+
+ a.stats.incrementRequests()
+ return a.backend.Exists(pth)
+}
+
func Connect(u string, opts ArchiveOptions) (*Archive, error) {
arch := Archive{
networkPassphrase: opts.NetworkPassphrase,
@@ -390,7 +434,21 @@ func Connect(u string, opts ArchiveOptions) (*Archive, error) {
var err error
arch.backend, err = ConnectBackend(u, opts.ConnectOptions)
- return &arch, err
+ if err != nil {
+ return &arch, err
+ }
+
+ if opts.CacheConfig.Cache {
+ cache, innerErr := MakeArchiveBucketCache(opts.CacheConfig)
+ if innerErr != nil {
+ return &arch, innerErr
+ }
+
+ arch.cache = cache
+ }
+
+ arch.stats = archiveStats{backendName: u}
+ return &arch, nil
}
func ConnectBackend(u string, opts storage.ConnectOptions) (storage.Storage, error) {
@@ -398,12 +456,14 @@ func ConnectBackend(u string, opts storage.ConnectOptions) (storage.Storage, err
return nil, errors.New("URL is empty")
}
+ var err error
parsed, err := url.Parse(u)
if err != nil {
return nil, err
}
var backend storage.Storage
+
if parsed.Scheme == "mock" {
backend = makeMockBackend()
} else {
diff --git a/historyarchive/archive_cache.go b/historyarchive/archive_cache.go
new file mode 100644
index 0000000000..50b15b958c
--- /dev/null
+++ b/historyarchive/archive_cache.go
@@ -0,0 +1,226 @@
+package historyarchive
+
+import (
+ "io"
+ "os"
+ "path"
+
+ lru "github.com/hashicorp/golang-lru"
+ log "github.com/stellar/go/support/log"
+ "github.com/stellar/go/support/storage"
+)
+
+type CacheOptions struct {
+ Cache bool
+
+ Path string
+ MaxFiles uint
+ Log *log.Entry
+}
+
+type ArchiveBucketCache struct {
+ path string
+ lru *lru.Cache
+ log *log.Entry
+}
+
+// MakeArchiveBucketCache creates a cache on the disk at the given path that
+// acts as an LRU cache, mimicking a particular upstream.
+func MakeArchiveBucketCache(opts CacheOptions) (*ArchiveBucketCache, error) {
+ log_ := opts.Log
+ if opts.Log == nil {
+ log_ = log.WithField("subservice", "fs-cache")
+ }
+ log_ = log_.
+ WithField("path", opts.Path).
+ WithField("cap", opts.MaxFiles)
+
+ if _, err := os.Stat(opts.Path); err == nil || os.IsExist(err) {
+ log_.Warnf("Cache directory already exists, removing")
+ os.RemoveAll(opts.Path)
+ }
+
+ backend := &ArchiveBucketCache{
+ path: opts.Path,
+ log: log_,
+ }
+
+ cache, err := lru.NewWithEvict(int(opts.MaxFiles), backend.onEviction)
+ if err != nil {
+ return &ArchiveBucketCache{}, err
+ }
+ backend.lru = cache
+
+ log_.Info("Bucket cache initialized")
+ return backend, nil
+}
+
+// GetFile retrieves the file contents from the local cache if present.
+// Otherwise, it returns the same result as the upstream, adding that result
+// into the local cache if possible. It returns a 3-tuple of a reader (which may
+// be nil on an error), an indication of whether or not it was *found* in the
+// cache, and any error.
+func (abc *ArchiveBucketCache) GetFile(
+ filepath string,
+ upstream storage.Storage,
+) (io.ReadCloser, bool, error) {
+ L := abc.log.WithField("key", filepath)
+ localPath := path.Join(abc.path, filepath)
+
+ // If the lockfile exists, we should defer to the remote source but *not*
+ // update the cache, as it means there's an in-progress sync of the same
+ // file.
+ _, statErr := os.Stat(NameLockfile(localPath))
+ if statErr == nil || os.IsExist(statErr) {
+ L.Info("Incomplete file in on-disk cache: deferring")
+ reader, err := upstream.GetFile(filepath)
+ return reader, false, err
+ } else if _, ok := abc.lru.Get(localPath); !ok {
+ L.Info("File does not exist in the cache: downloading")
+
+ // Since it's not on-disk, pull it from the remote backend, shove it
+ // into the cache, and write it to disk.
+ remote, err := upstream.GetFile(filepath)
+ if err != nil {
+ return remote, false, err
+ }
+
+ local, err := abc.createLocal(filepath)
+ if err != nil {
+ // If there's some local FS error, we can still continue with the
+ // remote version, so just log it and continue.
+ L.WithError(err).Warn("Creating cache file failed")
+ return remote, false, nil
+ }
+
+ return teeReadCloser(remote, local, func() error {
+ L.Debug("Download complete: removing lockfile")
+ return os.Remove(NameLockfile(localPath))
+ }), false, nil
+ }
+
+ L.Info("Found file in cache")
+ // The cache claims it exists, so just give it a read and send it.
+ local, err := os.Open(localPath)
+ if err != nil {
+ // Uh-oh, the cache and the disk are not in sync somehow? Let's evict
+ // this value and try again (recurse) w/ the remote version.
+ L.WithError(err).Warn("Opening cached file failed")
+ abc.lru.Remove(localPath)
+ return abc.GetFile(filepath, upstream)
+ }
+
+ return local, true, nil
+}
+
+func (abc *ArchiveBucketCache) Exists(filepath string) bool {
+ localPath := path.Join(abc.path, filepath)
+
+ // First, check if the file exists in the cache.
+ if abc.lru.Contains(localPath) {
+ return true
+ }
+
+ // If it doesn't, it may still exist on the disk which is still a cheaper
+ // check than going upstream.
+ //
+ // Note that this means the cache and disk are out of sync (perhaps due to
+ // other archives using the same cache location) so we can update it. This
+ // situation is well-handled by `GetFile`.
+ _, statErr := os.Stat(localPath)
+ if statErr == nil || os.IsExist(statErr) {
+ abc.lru.Add(localPath, struct{}{})
+ return true
+ }
+
+ return false
+}
+
+// Close purges the cache and cleans up the filesystem.
+func (abc *ArchiveBucketCache) Close() error {
+ abc.lru.Purge()
+ return os.RemoveAll(abc.path)
+}
+
+// Evict removes a file from the cache and the filesystem.
+func (abc *ArchiveBucketCache) Evict(filepath string) {
+ log.WithField("key", filepath).Info("Evicting file from the disk")
+ abc.lru.Remove(path.Join(abc.path, filepath))
+}
+
+func (abc *ArchiveBucketCache) onEviction(key, value interface{}) {
+ path := key.(string)
+ os.Remove(NameLockfile(path)) // just in case
+ if err := os.Remove(path); err != nil { // best effort removal
+ abc.log.WithError(err).
+ WithField("key", path).
+ Warn("Removal failed after cache eviction")
+ }
+}
+
+func (abc *ArchiveBucketCache) createLocal(filepath string) (*os.File, error) {
+ localPath := path.Join(abc.path, filepath)
+ if err := os.MkdirAll(path.Dir(localPath), 0755 /* drwxr-xr-x */); err != nil {
+ return nil, err
+ }
+
+ local, err := os.Create(localPath) /* mode -rw-rw-rw- */
+ if err != nil {
+ return nil, err
+ }
+ _, err = os.Create(NameLockfile(localPath))
+ if err != nil {
+ return nil, err
+ }
+
+ abc.lru.Add(localPath, struct{}{}) // just use the cache as an array
+ return local, nil
+}
+
+func NameLockfile(file string) string {
+ return file + ".lock"
+}
+
+// The below is a helper interface so that we can use io.TeeReader to write
+// data locally immediately as we read it remotely.
+
+type trc struct {
+ io.Reader
+ close func() error
+ closed bool // prevents a double-close
+}
+
+func (t trc) Close() error {
+ if t.closed {
+ return nil
+ }
+
+ return t.close()
+}
+
+func teeReadCloser(r io.ReadCloser, w io.WriteCloser, onClose func() error) io.ReadCloser {
+ closer := trc{
+ Reader: io.TeeReader(r, w),
+ closed: false,
+ }
+ closer.close = func() error {
+ if closer.closed {
+ return nil
+ }
+
+ // Always run all closers, but return the first error
+ err1 := r.Close()
+ err2 := w.Close()
+ err3 := onClose()
+
+ closer.closed = true
+ if err1 != nil {
+ return err1
+ } else if err2 != nil {
+ return err2
+ }
+ return err3
+ }
+
+ return closer
+}
diff --git a/historyarchive/archive_pool.go b/historyarchive/archive_pool.go
index e4e24e0853..4cb5483f63 100644
--- a/historyarchive/archive_pool.go
+++ b/historyarchive/archive_pool.go
@@ -51,8 +51,16 @@ func NewArchivePool(archiveURLs []string, opts ArchiveOptions) (ArchivePool, err
return validArchives, nil
}
+func (pa ArchivePool) GetStats() []ArchiveStats {
+ stats := []ArchiveStats{}
+ for _, archive := range pa {
+ stats = append(stats, archive.GetStats()...)
+ }
+ return stats
+}
+
// Ensure the pool conforms to the ArchiveInterface
-var _ ArchiveInterface = ArchivePool{}
+var _ ArchiveInterface = &ArchivePool{}
// Below are the ArchiveInterface method implementations.
diff --git a/historyarchive/archive_test.go b/historyarchive/archive_test.go
index 4f90802bf7..de34c36f68 100644
--- a/historyarchive/archive_test.go
+++ b/historyarchive/archive_test.go
@@ -13,7 +13,10 @@ import (
"io"
"io/ioutil"
"math/big"
+ "net/http"
+ "net/http/httptest"
"os"
+ "path/filepath"
"strings"
"testing"
@@ -46,7 +49,13 @@ func GetTestS3Archive() *Archive {
}
func GetTestMockArchive() *Archive {
- return MustConnect("mock://test", ArchiveOptions{CheckpointFrequency: DefaultCheckpointFrequency})
+ return MustConnect("mock://test",
+ ArchiveOptions{CheckpointFrequency: 64,
+ CacheConfig: CacheOptions{
+ Cache: true,
+ Path: filepath.Join(os.TempDir(), "history-archive-test-cache"),
+ MaxFiles: 5,
+ }})
}
var tmpdirs []string
@@ -183,6 +192,27 @@ func TestScan(t *testing.T) {
GetRandomPopulatedArchive().Scan(opts)
}
+func TestConfiguresHttpUserAgent(t *testing.T) {
+ var userAgent string
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ userAgent = r.Header["User-Agent"][0]
+ w.WriteHeader(http.StatusOK)
+ }))
+ defer server.Close()
+
+ archive, err := Connect(server.URL, ArchiveOptions{
+ ConnectOptions: storage.ConnectOptions{
+ UserAgent: "uatest",
+ },
+ })
+ assert.NoError(t, err)
+
+ ok, err := archive.BucketExists(EmptyXdrArrayHash())
+ assert.True(t, ok)
+ assert.NoError(t, err)
+ assert.Equal(t, userAgent, "uatest")
+}
+
func TestScanSize(t *testing.T) {
defer cleanup()
opts := testOptions()
@@ -530,6 +560,8 @@ func assertXdrEquals(t *testing.T, a, b xdrEntry) {
func TestGetLedgers(t *testing.T) {
archive := GetTestMockArchive()
_, err := archive.GetLedgers(1000, 1002)
+ assert.Equal(t, uint32(1), archive.GetStats()[0].GetRequests())
+ assert.Equal(t, uint32(0), archive.GetStats()[0].GetDownloads())
assert.EqualError(t, err, "checkpoint 1023 is not published")
ledgerHeaders := []xdr.LedgerHeaderHistoryEntry{
@@ -614,9 +646,32 @@ func TestGetLedgers(t *testing.T) {
[]xdrEntry{results[0], results[1], results[2]},
)
+ stats := archive.GetStats()[0]
ledgers, err := archive.GetLedgers(1000, 1002)
+
assert.NoError(t, err)
assert.Len(t, ledgers, 3)
+ // it started at 1, incurred 6 requests total, 3 queries, 3 downloads
+ assert.EqualValues(t, 7, stats.GetRequests())
+ // started 0, incurred 3 file downloads
+ assert.EqualValues(t, 3, stats.GetDownloads())
+ for i, seq := range []uint32{1000, 1001, 1002} {
+ ledger := ledgers[seq]
+ assertXdrEquals(t, ledgerHeaders[i], ledger.Header)
+ assertXdrEquals(t, transactions[i], ledger.Transaction)
+ assertXdrEquals(t, results[i], ledger.TransactionResult)
+ }
+
+ // Repeat the same check but ensure the cache was used
+ ledgers, err = archive.GetLedgers(1000, 1002) // all cached
+ assert.NoError(t, err)
+ assert.Len(t, ledgers, 3)
+
+ // downloads should not change because of the cache
+ assert.EqualValues(t, 3, stats.GetDownloads())
+ // but requests increase because of 3 fetches to categories
+ assert.EqualValues(t, 10, stats.GetRequests())
+ assert.EqualValues(t, 3, stats.GetCacheHits())
for i, seq := range []uint32{1000, 1001, 1002} {
ledger := ledgers[seq]
assertXdrEquals(t, ledgerHeaders[i], ledger.Header)
diff --git a/historyarchive/mocks.go b/historyarchive/mocks.go
index 3952211cd3..fe497ec36e 100644
--- a/historyarchive/mocks.go
+++ b/historyarchive/mocks.go
@@ -103,3 +103,37 @@ func (m *MockArchive) GetXdrStream(pth string) (*XdrStream, error) {
a := m.Called(pth)
return a.Get(0).(*XdrStream), a.Error(1)
}
+
+func (m *MockArchive) GetStats() []ArchiveStats {
+ a := m.Called()
+ return a.Get(0).([]ArchiveStats)
+}
+
+type MockArchiveStats struct {
+ mock.Mock
+}
+
+func (m *MockArchiveStats) GetRequests() uint32 {
+ a := m.Called()
+ return a.Get(0).(uint32)
+}
+
+func (m *MockArchiveStats) GetDownloads() uint32 {
+ a := m.Called()
+ return a.Get(0).(uint32)
+}
+
+func (m *MockArchiveStats) GetUploads() uint32 {
+ a := m.Called()
+ return a.Get(0).(uint32)
+}
+
+func (m *MockArchiveStats) GetBackendName() string {
+ a := m.Called()
+ return a.Get(0).(string)
+}
+
+func (m *MockArchiveStats) GetCacheHits() uint32 {
+ a := m.Called()
+ return a.Get(0).(uint32)
+}
diff --git a/historyarchive/stats.go b/historyarchive/stats.go
new file mode 100644
index 0000000000..c182853d1b
--- /dev/null
+++ b/historyarchive/stats.go
@@ -0,0 +1,57 @@
+package historyarchive
+
+import "sync/atomic"
+
+// golang will auto wrap them back to 0 if they overflow after addition.
+type archiveStats struct {
+ requests atomic.Uint32
+ fileDownloads atomic.Uint32
+ fileUploads atomic.Uint32
+ cacheHits atomic.Uint32
+ backendName string
+}
+
+type ArchiveStats interface {
+ GetRequests() uint32
+ GetDownloads() uint32
+ GetUploads() uint32
+ GetCacheHits() uint32
+ GetBackendName() string
+}
+
+func (as *archiveStats) incrementDownloads() {
+ as.fileDownloads.Add(1)
+ as.incrementRequests()
+}
+
+func (as *archiveStats) incrementUploads() {
+ as.fileUploads.Add(1)
+ as.incrementRequests()
+}
+
+func (as *archiveStats) incrementRequests() {
+ as.requests.Add(1)
+}
+
+func (as *archiveStats) incrementCacheHits() {
+ as.cacheHits.Add(1)
+}
+
+func (as *archiveStats) GetRequests() uint32 {
+ return as.requests.Load()
+}
+
+func (as *archiveStats) GetDownloads() uint32 {
+ return as.fileDownloads.Load()
+}
+
+func (as *archiveStats) GetUploads() uint32 {
+ return as.fileUploads.Load()
+}
+
+func (as *archiveStats) GetBackendName() string {
+ return as.backendName
+}
+func (as *archiveStats) GetCacheHits() uint32 {
+ return as.cacheHits.Load()
+}
diff --git a/historyarchive/xdrstream.go b/historyarchive/xdrstream.go
index e0d9745585..de8efc3bb6 100644
--- a/historyarchive/xdrstream.go
+++ b/historyarchive/xdrstream.go
@@ -134,11 +134,7 @@ func (x *XdrStream) closeReaders() error {
err = err2
}
}
- if x.rdr2 != nil {
- if err2 := x.rdr2.Close(); err2 != nil {
- err = err2
- }
- }
+
if x.gzipReader != nil {
if err2 := x.gzipReader.Close(); err2 != nil {
err = err2
diff --git a/ingest/ledgerbackend/captive_core_backend.go b/ingest/ledgerbackend/captive_core_backend.go
index ce8da8f8cd..bc29acb54b 100644
--- a/ingest/ledgerbackend/captive_core_backend.go
+++ b/ingest/ledgerbackend/captive_core_backend.go
@@ -222,7 +222,6 @@ func (c *CaptiveStellarCore) coreSyncedMetric() float64 {
info, err := c.stellarCoreClient.Info(c.config.Context)
if err != nil {
- c.config.Log.WithError(err).Warn("Cannot connect to Captive Stellar-Core HTTP server")
return -1
}
@@ -240,7 +239,6 @@ func (c *CaptiveStellarCore) coreVersionMetric() float64 {
info, err := c.stellarCoreClient.Info(c.config.Context)
if err != nil {
- c.config.Log.WithError(err).Warn("Cannot connect to Captive Stellar-Core HTTP server")
return -1
}
diff --git a/ingest/ledgerbackend/captive_core_backend_test.go b/ingest/ledgerbackend/captive_core_backend_test.go
index fb2ea4eff4..5178fd97a1 100644
--- a/ingest/ledgerbackend/captive_core_backend_test.go
+++ b/ingest/ledgerbackend/captive_core_backend_test.go
@@ -4,6 +4,8 @@ import (
"context"
"encoding/hex"
"fmt"
+ "net/http"
+ "net/http/httptest"
"os"
"sync"
"testing"
@@ -138,9 +140,16 @@ func TestCaptiveNew(t *testing.T) {
require.NoError(t, err)
defer os.RemoveAll(storagePath)
+ var userAgent string
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ userAgent = r.Header["User-Agent"][0]
+ w.WriteHeader(http.StatusOK)
+ }))
+ defer server.Close()
+
executablePath := "/etc/stellar-core"
networkPassphrase := network.PublicNetworkPassphrase
- historyURLs := []string{"http://history.stellar.org/prd/core-live/core_live_001"}
+ historyURLs := []string{server.URL}
captiveStellarCore, err := NewCaptive(
CaptiveCoreConfig{
@@ -148,12 +157,16 @@ func TestCaptiveNew(t *testing.T) {
NetworkPassphrase: networkPassphrase,
HistoryArchiveURLs: historyURLs,
StoragePath: storagePath,
+ UserAgent: "uatest",
},
)
assert.NoError(t, err)
assert.Equal(t, uint32(0), captiveStellarCore.nextLedger)
assert.NotNil(t, captiveStellarCore.archive)
+ _, err = captiveStellarCore.archive.BucketExists(historyarchive.EmptyXdrArrayHash())
+ assert.NoError(t, err)
+ assert.Equal(t, "uatest", userAgent)
}
func TestCaptivePrepareRange(t *testing.T) {
diff --git a/ingest/ledgerbackend/toml.go b/ingest/ledgerbackend/toml.go
index 55e36e9b9f..e2234fc1f2 100644
--- a/ingest/ledgerbackend/toml.go
+++ b/ingest/ledgerbackend/toml.go
@@ -559,7 +559,7 @@ func (c *CaptiveCoreToml) setDefaults(params CaptiveCoreTomlParams) {
for i, val := range params.HistoryArchiveURLs {
name := fmt.Sprintf("HISTORY.h%d", i)
c.HistoryEntries[c.tablePlaceholders.newPlaceholder(name)] = History{
- Get: fmt.Sprintf("curl -sf %s/{0} -o {1}", val),
+ Get: fmt.Sprintf("curl -sf %s/{0} -o {1}", strings.TrimSuffix(val, "/")),
}
}
}
diff --git a/ingest/ledgerbackend/toml_test.go b/ingest/ledgerbackend/toml_test.go
index 476a2ea953..c5d40c77e3 100644
--- a/ingest/ledgerbackend/toml_test.go
+++ b/ingest/ledgerbackend/toml_test.go
@@ -395,6 +395,28 @@ func TestGenerateConfig(t *testing.T) {
}
}
+func TestHistoryArchiveURLTrailingSlash(t *testing.T) {
+ httpPort := uint(8000)
+ peerPort := uint(8000)
+ logPath := "logPath"
+
+ params := CaptiveCoreTomlParams{
+ NetworkPassphrase: "Public Global Stellar Network ; September 2015",
+ HistoryArchiveURLs: []string{"http://localhost:1170/"},
+ HTTPPort: &httpPort,
+ PeerPort: &peerPort,
+ LogPath: &logPath,
+ Strict: false,
+ }
+
+ captiveCoreToml, err := NewCaptiveCoreToml(params)
+ assert.NoError(t, err)
+ assert.Len(t, captiveCoreToml.HistoryEntries, 1)
+ for _, entry := range captiveCoreToml.HistoryEntries {
+ assert.Equal(t, "curl -sf http://localhost:1170/{0} -o {1}", entry.Get)
+ }
+}
+
func TestExternalStorageConfigUsesDatabaseToml(t *testing.T) {
var err error
var captiveCoreToml *CaptiveCoreToml
diff --git a/ingest/verify/main.go b/ingest/verify/main.go
index 4b97ffc2f7..6110448723 100644
--- a/ingest/verify/main.go
+++ b/ingest/verify/main.go
@@ -66,7 +66,7 @@ func (v *StateVerifier) GetLedgerEntries(count int) ([]xdr.LedgerEntry, error) {
}
entries := make([]xdr.LedgerEntry, 0, count)
- v.currentEntries = make(map[string]xdr.LedgerEntry)
+ v.currentEntries = make(map[string]xdr.LedgerEntry, count)
for count > 0 {
entryChange, err := v.stateReader.Read()
diff --git a/services/horizon/CHANGELOG.md b/services/horizon/CHANGELOG.md
index 619c1f40e7..13bcbe92b2 100644
--- a/services/horizon/CHANGELOG.md
+++ b/services/horizon/CHANGELOG.md
@@ -5,10 +5,41 @@ file. This project adheres to [Semantic Versioning](http://semver.org/).
## Unreleased
+## 2.28.0
+
+### Fixed
+- Ingestion performance improvements ([4909](https://github.com/stellar/go/issues/4909))
+- Trade aggregation rebuild errors reported on `db reingest range` with parallel workers ([5168](https://github.com/stellar/go/pull/5168))
+- Limited global flags displayed on cli help output ([5077](https://github.com/stellar/go/pull/5077))
+- Network usage has been significantly reduced with caching. **Warning:** To support the cache, disk requirements may increase by up to 15GB ([5171](https://github.com/stellar/go/pull/5171)).
+
### Added
+- We now include metrics for history archive requests ([5166](https://github.com/stellar/go/pull/5166))
+- Http history archive requests now include a unique user agent ([5166](https://github.com/stellar/go/pull/5166))
+- Added a deprecation warning for using command-line flags when running Horizon ([5051](https://github.com/stellar/go/pull/5051))
+- New optional config `DISABLE_SOROBAN_INGEST` ([5175](https://github.com/stellar/go/issues/5175)). Defaults to `FALSE`, when `TRUE` and a soroban transaction is ingested, the following will occur:
+ * no effects will be generated for contract invocations.
+ * history_transactions.tx_meta column will have serialized xdr that equates to an empty `xdr.TransactionMeta.V3`, `Operations`, `TxChangesAfter`, `TxChangesBefore` will empty arrays and `SorobanMeta` will be nil.
+ * API transaction model for `result_meta_xdr` will have same empty serialized xdr for `xdr.TransactionMeta.V3`, `Operations`, `TxChangesAfter`, `TxChangesBefore` will empty arrays and `SorobanMeta` will be nil.
+ * API `Operation` model for `InvokeHostFunctionOp` type, will have empty `asset_balance_changes`
+
+### Breaking Changes
+- Deprecation of legacy, non-captive core ingestion([5158](https://github.com/stellar/go/pull/5158)):
+ * removed configuration flags `--stellar-core-url-db`, `--cursor-name` `--skip-cursor-update`, they are no longer usable.
+ * removed automatic updating of core cursor from ingestion background processing.
+ **Note** for upgrading on existing horizon deployments - Since horizon will no longer maintain advancement of this cursor on core, it may require manual removal of the cursor from the core process that your horizon was using for captive core, otherwise that core process may un-necessarily retain older data in buckets on disk up to the last cursor ledger sequence set by prior horizon release.
+
+ The captive core process to check and verify presence of cursor usage is determined by the horizon deployment, if `NETWORK` is present, or `STELLAR_CORE_URL` is present or `CAPTIVE-CORE-HTTP-PORT` is present and set to non-zero value, or `CAPTIVE-CORE_CONFIG_PATH` is used and the toml has `HTTP_PORT` set to non-zero and `PUBLIC_HTTP_PORT` is not set to false, then it is recommended to perform the following preventative measure on the machine hosting horizon after upgraded to 2.28.0 and process restarted:
+ ```
+ $ curl http:///getcursor
+ # If there are no cursors reported, done, no need for any action
+ # If any horizon cursors exist they need to be dropped by id.
+ # By default horizon sets cursor id to "HORIZON" but if it was customized
+ # using the --cursor-name flag the id might be different
+ $ curl http:///dropcursor?id=
+ ```
+
-- Add a deprecation warning for using command-line flags when running Horizon ([5051](https://github.com/stellar/go/pull/5051))
-- Deprecate configuration flags related to legacy non-captive core ingestion ([5100](https://github.com/stellar/go/pull/5100))
## 2.27.0
### Fixed
diff --git a/services/horizon/cmd/db.go b/services/horizon/cmd/db.go
index a83597932e..07bbf975fa 100644
--- a/services/horizon/cmd/db.go
+++ b/services/horizon/cmd/db.go
@@ -413,14 +413,13 @@ func runDBReingestRange(ledgerRanges []history.LedgerRange, reingestForce bool,
ReingestRetryBackoffSeconds: int(retryBackoffSeconds),
CaptiveCoreBinaryPath: config.CaptiveCoreBinaryPath,
CaptiveCoreConfigUseDB: config.CaptiveCoreConfigUseDB,
- RemoteCaptiveCoreURL: config.RemoteCaptiveCoreURL,
CaptiveCoreToml: config.CaptiveCoreToml,
CaptiveCoreStoragePath: config.CaptiveCoreStoragePath,
- StellarCoreCursor: config.CursorName,
StellarCoreURL: config.StellarCoreURL,
RoundingSlippageFilter: config.RoundingSlippageFilter,
EnableIngestionFiltering: config.EnableIngestionFiltering,
MaxLedgerPerFlush: maxLedgersPerFlush,
+ SkipSorobanIngestion: config.SkipSorobanIngestion,
}
if ingestConfig.HistorySession, err = db.Open("postgres", config.DatabaseURL); err != nil {
@@ -445,7 +444,7 @@ func runDBReingestRange(ledgerRanges []history.LedgerRange, reingestForce bool,
}
defer system.Shutdown()
- err = system.ReingestRange(ledgerRanges, reingestForce)
+ err = system.ReingestRange(ledgerRanges, reingestForce, true)
if err != nil {
if _, ok := errors.Cause(err).(ingest.ErrReingestRangeConflict); ok {
return fmt.Errorf(`The range you have provided overlaps with Horizon's most recently ingested ledger.
diff --git a/services/horizon/cmd/ingest.go b/services/horizon/cmd/ingest.go
index e2d38977ab..3833dba7fd 100644
--- a/services/horizon/cmd/ingest.go
+++ b/services/horizon/cmd/ingest.go
@@ -130,7 +130,6 @@ var ingestVerifyRangeCmd = &cobra.Command{
HistoryArchiveURLs: globalConfig.HistoryArchiveURLs,
CaptiveCoreBinaryPath: globalConfig.CaptiveCoreBinaryPath,
CaptiveCoreConfigUseDB: globalConfig.CaptiveCoreConfigUseDB,
- RemoteCaptiveCoreURL: globalConfig.RemoteCaptiveCoreURL,
CheckpointFrequency: globalConfig.CheckpointFrequency,
CaptiveCoreToml: globalConfig.CaptiveCoreToml,
CaptiveCoreStoragePath: globalConfig.CaptiveCoreStoragePath,
@@ -213,7 +212,6 @@ var ingestStressTestCmd = &cobra.Command{
HistoryArchiveURLs: globalConfig.HistoryArchiveURLs,
RoundingSlippageFilter: globalConfig.RoundingSlippageFilter,
CaptiveCoreBinaryPath: globalConfig.CaptiveCoreBinaryPath,
- RemoteCaptiveCoreURL: globalConfig.RemoteCaptiveCoreURL,
CaptiveCoreConfigUseDB: globalConfig.CaptiveCoreConfigUseDB,
}
@@ -353,7 +351,6 @@ var ingestBuildStateCmd = &cobra.Command{
HistoryArchiveURLs: globalConfig.HistoryArchiveURLs,
CaptiveCoreBinaryPath: globalConfig.CaptiveCoreBinaryPath,
CaptiveCoreConfigUseDB: globalConfig.CaptiveCoreConfigUseDB,
- RemoteCaptiveCoreURL: globalConfig.RemoteCaptiveCoreURL,
CheckpointFrequency: globalConfig.CheckpointFrequency,
CaptiveCoreToml: globalConfig.CaptiveCoreToml,
CaptiveCoreStoragePath: globalConfig.CaptiveCoreStoragePath,
diff --git a/services/horizon/internal/config.go b/services/horizon/internal/config.go
index 1cc14b4900..8fb31075b8 100644
--- a/services/horizon/internal/config.go
+++ b/services/horizon/internal/config.go
@@ -21,7 +21,6 @@ type Config struct {
EnableIngestionFiltering bool
CaptiveCoreBinaryPath string
- RemoteCaptiveCoreURL string
CaptiveCoreConfigPath string
CaptiveCoreTomlParams ledgerbackend.CaptiveCoreTomlParams
CaptiveCoreToml *ledgerbackend.CaptiveCoreToml
@@ -68,11 +67,6 @@ type Config struct {
TLSKey string
// Ingest toggles whether this horizon instance should run the data ingestion subsystem.
Ingest bool
- // CursorName is the cursor used for ingesting from stellar-core.
- // Setting multiple cursors in different Horizon instances allows multiple
- // Horizons to ingest from the same stellar-core instance without cursor
- // collisions.
- CursorName string
// HistoryRetentionCount represents the minimum number of ledgers worth of
// history data to retain in the horizon database. For the purposes of
// determining a "retention duration", each ledger roughly corresponds to 10
@@ -82,9 +76,6 @@ type Config struct {
// out-of-date by before horizon begins to respond with an error to history
// requests.
StaleThreshold uint
- // SkipCursorUpdate causes the ingestor to skip reporting the "last imported
- // ledger" state to stellar-core.
- SkipCursorUpdate bool
// IngestDisableStateVerification disables state verification
// `System.verifyState()` when set to `true`.
IngestDisableStateVerification bool
@@ -117,4 +108,6 @@ type Config struct {
Network string
// DisableTxSub disables transaction submission functionality for Horizon.
DisableTxSub bool
+ // SkipSorobanIngestion skips Soroban related ingestion processing.
+ SkipSorobanIngestion bool
}
diff --git a/services/horizon/internal/flags.go b/services/horizon/internal/flags.go
index e2783680fd..eb229c65b2 100644
--- a/services/horizon/internal/flags.go
+++ b/services/horizon/internal/flags.go
@@ -57,6 +57,8 @@ const (
EnableIngestionFilteringFlagName = "exp-enable-ingestion-filtering"
// DisableTxSubFlagName is the command line flag for disabling transaction submission feature of Horizon
DisableTxSubFlagName = "disable-tx-sub"
+ // SkipSorobanIngestionFlagName is the command line flag for disabling Soroban related ingestion processing
+ SkipSorobanIngestionFlagName = "disable-soroban-ingest"
// StellarPubnet is a constant representing the Stellar public network
StellarPubnet = "pubnet"
@@ -338,11 +340,7 @@ func Flags() (*Config, support.ConfigOptions) {
Hidden: true,
CustomSetValue: func(opt *support.ConfigOption) error {
if val := viper.GetString(opt.Name); val != "" {
- stdLog.Printf(
- "DEPRECATED - The usage of the flag --stellar-core-db-url has been deprecated. " +
- "Horizon now uses Captive-Core ingestion by default and this flag will soon be removed in " +
- "the future.",
- )
+ return fmt.Errorf("flag --stellar-core-db-url and environment variable STELLAR_CORE_DATABASE_URL have been removed and no longer valid, must use captive core configuration for ingestion")
}
return nil
},
@@ -595,11 +593,15 @@ func Flags() (*Config, support.ConfigOptions) {
&support.ConfigOption{
Name: "cursor-name",
EnvVar: "CURSOR_NAME",
- ConfigKey: &config.CursorName,
OptType: types.String,
- FlagDefault: "HORIZON",
- Usage: "ingestor cursor used by horizon to ingest from stellar core. must be uppercase and unique for each horizon instance ingesting from that core instance.",
+ Hidden: true,
UsedInCommands: IngestionCommands,
+ CustomSetValue: func(opt *support.ConfigOption) error {
+ if val := viper.GetString(opt.Name); val != "" {
+ return fmt.Errorf("flag --cursor-name has been removed and no longer valid, must use captive core configuration for ingestion")
+ }
+ return nil
+ },
},
&support.ConfigOption{
Name: "history-retention-count",
@@ -619,11 +621,15 @@ func Flags() (*Config, support.ConfigOptions) {
},
&support.ConfigOption{
Name: "skip-cursor-update",
- ConfigKey: &config.SkipCursorUpdate,
- OptType: types.Bool,
- FlagDefault: false,
- Usage: "causes the ingester to skip reporting the last imported ledger state to stellar-core",
+ OptType: types.String,
+ Hidden: true,
UsedInCommands: IngestionCommands,
+ CustomSetValue: func(opt *support.ConfigOption) error {
+ if val := viper.GetString(opt.Name); val != "" {
+ return fmt.Errorf("flag --skip-cursor-update has been removed and no longer valid, must use captive core configuration for ingestion")
+ }
+ return nil
+ },
},
&support.ConfigOption{
Name: "ingest-disable-state-verification",
@@ -726,6 +732,15 @@ func Flags() (*Config, support.ConfigOptions) {
HistoryArchiveURLsFlagName, CaptiveCoreConfigPathName),
UsedInCommands: IngestionCommands,
},
+ &support.ConfigOption{
+ Name: SkipSorobanIngestionFlagName,
+ ConfigKey: &config.SkipSorobanIngestion,
+ OptType: types.Bool,
+ FlagDefault: false,
+ Required: false,
+ Usage: "excludes Soroban data during ingestion processing",
+ UsedInCommands: IngestionCommands,
+ },
}
return config, flags
diff --git a/services/horizon/internal/flags_test.go b/services/horizon/internal/flags_test.go
index b2e617bc00..ef2d5d3a02 100644
--- a/services/horizon/internal/flags_test.go
+++ b/services/horizon/internal/flags_test.go
@@ -6,6 +6,7 @@ import (
"testing"
"github.com/spf13/cobra"
+
"github.com/stellar/go/services/horizon/internal/test"
"github.com/stretchr/testify/assert"
@@ -259,3 +260,72 @@ func TestEnvironmentVariables(t *testing.T) {
assert.Equal(t, config.CaptiveCoreConfigPath, "../docker/captive-core-classic-integration-tests.cfg")
assert.Equal(t, config.CaptiveCoreConfigUseDB, true)
}
+
+func TestRemovedFlags(t *testing.T) {
+ tests := []struct {
+ name string
+ environmentVars map[string]string
+ errStr string
+ cmdArgs []string
+ }{
+ {
+ name: "STELLAR_CORE_DATABASE_URL removed",
+ environmentVars: map[string]string{
+ "INGEST": "false",
+ "STELLAR_CORE_DATABASE_URL": "coredb",
+ "DATABASE_URL": "dburl",
+ },
+ errStr: "flag --stellar-core-db-url and environment variable STELLAR_CORE_DATABASE_URL have been removed and no longer valid, must use captive core configuration for ingestion",
+ },
+ {
+ name: "--stellar-core-db-url removed",
+ environmentVars: map[string]string{
+ "INGEST": "false",
+ "DATABASE_URL": "dburl",
+ },
+ errStr: "flag --stellar-core-db-url and environment variable STELLAR_CORE_DATABASE_URL have been removed and no longer valid, must use captive core configuration for ingestion",
+ cmdArgs: []string{"--stellar-core-db-url=coredb"},
+ },
+ {
+ name: "CURSOR_NAME removed",
+ environmentVars: map[string]string{
+ "INGEST": "false",
+ "CURSOR_NAME": "cursor",
+ "DATABASE_URL": "dburl",
+ },
+ errStr: "flag --cursor-name has been removed and no longer valid, must use captive core configuration for ingestion",
+ },
+ {
+ name: "SKIP_CURSOR_UPDATE removed",
+ environmentVars: map[string]string{
+ "INGEST": "false",
+ "SKIP_CURSOR_UPDATE": "true",
+ "DATABASE_URL": "dburl",
+ },
+ errStr: "flag --skip-cursor-update has been removed and no longer valid, must use captive core configuration for ingestion",
+ },
+ }
+
+ envManager := test.NewEnvironmentManager()
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ defer func() {
+ envManager.Restore()
+ }()
+ err := envManager.InitializeEnvironmentVariables(tt.environmentVars)
+ require.NoError(t, err)
+
+ config, flags := Flags()
+ testCmd := &cobra.Command{
+ Use: "test",
+ }
+
+ require.NoError(t, flags.Init(testCmd))
+ require.NoError(t, testCmd.ParseFlags(tt.cmdArgs))
+
+ err = ApplyFlags(config, flags, ApplyOptions{})
+ require.Error(t, err)
+ assert.Equal(t, tt.errStr, err.Error())
+ })
+ }
+}
diff --git a/services/horizon/internal/ingest/build_state_test.go b/services/horizon/internal/ingest/build_state_test.go
index 7e03818795..d1409182d9 100644
--- a/services/horizon/internal/ingest/build_state_test.go
+++ b/services/horizon/internal/ingest/build_state_test.go
@@ -10,7 +10,6 @@ import (
"github.com/stellar/go/ingest/ledgerbackend"
"github.com/stellar/go/support/errors"
"github.com/stellar/go/xdr"
- "github.com/stretchr/testify/mock"
"github.com/stretchr/testify/suite"
)
@@ -83,12 +82,6 @@ func (s *BuildStateTestSuite) mockCommonHistoryQ() {
s.historyQ.On("UpdateLastLedgerIngest", s.ctx, s.lastLedger).Return(nil).Once()
s.historyQ.On("UpdateExpStateInvalid", s.ctx, false).Return(nil).Once()
s.historyQ.On("TruncateIngestStateTables", s.ctx).Return(nil).Once()
- s.stellarCoreClient.On(
- "SetCursor",
- mock.AnythingOfType("*context.timerCtx"),
- defaultCoreCursorName,
- int32(62),
- ).Return(nil).Once()
}
func (s *BuildStateTestSuite) TestCheckPointLedgerIsZero() {
@@ -175,12 +168,6 @@ func (s *BuildStateTestSuite) TestUpdateLastLedgerIngestReturnsError() {
s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(s.lastLedger, nil).Once()
s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once()
s.historyQ.On("UpdateLastLedgerIngest", s.ctx, s.lastLedger).Return(errors.New("my error")).Once()
- s.stellarCoreClient.On(
- "SetCursor",
- mock.AnythingOfType("*context.timerCtx"),
- defaultCoreCursorName,
- int32(62),
- ).Return(nil).Once()
next, err := buildState{checkpointLedger: s.checkpointLedger}.run(s.system)
@@ -194,12 +181,6 @@ func (s *BuildStateTestSuite) TestUpdateExpStateInvalidReturnsError() {
s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once()
s.historyQ.On("UpdateLastLedgerIngest", s.ctx, s.lastLedger).Return(nil).Once()
s.historyQ.On("UpdateExpStateInvalid", s.ctx, false).Return(errors.New("my error")).Once()
- s.stellarCoreClient.On(
- "SetCursor",
- mock.AnythingOfType("*context.timerCtx"),
- defaultCoreCursorName,
- int32(62),
- ).Return(nil).Once()
next, err := buildState{checkpointLedger: s.checkpointLedger}.run(s.system)
@@ -215,13 +196,6 @@ func (s *BuildStateTestSuite) TestTruncateIngestStateTablesReturnsError() {
s.historyQ.On("UpdateExpStateInvalid", s.ctx, false).Return(nil).Once()
s.historyQ.On("TruncateIngestStateTables", s.ctx).Return(errors.New("my error")).Once()
- s.stellarCoreClient.On(
- "SetCursor",
- mock.AnythingOfType("*context.timerCtx"),
- defaultCoreCursorName,
- int32(62),
- ).Return(nil).Once()
-
next, err := buildState{checkpointLedger: s.checkpointLedger}.run(s.system)
s.Assert().Error(err)
@@ -251,12 +225,6 @@ func (s *BuildStateTestSuite) TestRunHistoryArchiveIngestionGenesisReturnsError(
s.historyQ.On("UpdateLastLedgerIngest", s.ctx, uint32(0)).Return(nil).Once()
s.historyQ.On("UpdateExpStateInvalid", s.ctx, false).Return(nil).Once()
s.historyQ.On("TruncateIngestStateTables", s.ctx).Return(nil).Once()
- s.stellarCoreClient.On(
- "SetCursor",
- mock.AnythingOfType("*context.timerCtx"),
- defaultCoreCursorName,
- int32(0),
- ).Return(nil).Once()
s.runner.
On("RunGenesisStateIngestion").
diff --git a/services/horizon/internal/ingest/db_integration_test.go b/services/horizon/internal/ingest/db_integration_test.go
index 86576db137..60a45f158e 100644
--- a/services/horizon/internal/ingest/db_integration_test.go
+++ b/services/horizon/internal/ingest/db_integration_test.go
@@ -81,7 +81,6 @@ func (s *DBTestSuite) SetupTest() {
s.historyAdapter = &mockHistoryArchiveAdapter{}
var err error
sIface, err := NewSystem(Config{
- CoreSession: s.tt.CoreSession(),
HistorySession: s.tt.HorizonSession(),
HistoryArchiveURLs: []string{"http://ignore.test"},
DisableStateVerification: false,
diff --git a/services/horizon/internal/ingest/fsm.go b/services/horizon/internal/ingest/fsm.go
index f5b4f94456..e0c667b033 100644
--- a/services/horizon/internal/ingest/fsm.go
+++ b/services/horizon/internal/ingest/fsm.go
@@ -8,6 +8,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
+ "github.com/stellar/go/historyarchive"
"github.com/stellar/go/ingest"
"github.com/stellar/go/ingest/ledgerbackend"
"github.com/stellar/go/support/errors"
@@ -326,11 +327,6 @@ func (b buildState) run(s *system) (transition, error) {
return nextFailState, nil
}
- if err = s.updateCursor(b.checkpointLedger - 1); err != nil {
- // Don't return updateCursor error.
- log.WithError(err).Warn("error updating stellar-core cursor")
- }
-
log.Info("Starting ingestion system from empty state...")
// Clear last_ingested_ledger in key value store
@@ -454,14 +450,6 @@ func (r resumeState) run(s *system) (transition, error) {
WithField("lastIngestedLedger", lastIngestedLedger).
Info("bumping ingest ledger to next ledger after ingested ledger in db")
- // Update cursor if there's more than one ingesting instance: either
- // Captive-Core or DB ingestion connected to another Stellar-Core.
- // remove now?
- if err = s.updateCursor(lastIngestedLedger); err != nil {
- // Don't return updateCursor error.
- log.WithError(err).Warn("error updating stellar-core cursor")
- }
-
// resume immediately so Captive-Core catchup is not slowed down
return resumeImmediately(lastIngestedLedger), nil
}
@@ -511,7 +499,7 @@ func (r resumeState) run(s *system) (transition, error) {
}
rebuildStart := time.Now()
- err = s.historyQ.RebuildTradeAggregationBuckets(s.ctx, ingestLedger, ingestLedger, s.config.RoundingSlippageFilter)
+ err = s.RebuildTradeAggregationBuckets(ingestLedger, ingestLedger)
if err != nil {
return retryResume(r), errors.Wrap(err, "error rebuilding trade aggregations")
}
@@ -522,12 +510,6 @@ func (r resumeState) run(s *system) (transition, error) {
return retryResume(r), err
}
- //TODO remove now? stellar-core-db-url is removed
- if err = s.updateCursor(ingestLedger); err != nil {
- // Don't return updateCursor error.
- log.WithError(err).Warn("error updating stellar-core cursor")
- }
-
duration = time.Since(startTime).Seconds()
s.Metrics().LedgerIngestionDuration.Observe(float64(duration))
@@ -542,6 +524,13 @@ func (r resumeState) run(s *system) (transition, error) {
r.addLedgerStatsMetricFromMap(s, "trades", tradeStatsMap)
r.addProcessorDurationsMetricFromMap(s, stats.transactionDurations)
+ // since a single system instance is shared throughout all states,
+ // this will sweep up increments to history archive counters
+ // done elsewhere such as verifyState invocations since the same system
+ // instance is passed there and the additional usages of archives will just
+ // roll up and be reported here as part of resumeState transition
+ addHistoryArchiveStatsMetrics(s, s.historyAdapter.GetStats())
+
localLog := log.WithFields(logpkg.F{
"sequence": ingestLedger,
"duration": duration,
@@ -584,6 +573,31 @@ func (r resumeState) addProcessorDurationsMetricFromMap(s *system, m map[string]
}
}
+func addHistoryArchiveStatsMetrics(s *system, stats []historyarchive.ArchiveStats) {
+ for _, historyServerStat := range stats {
+ s.Metrics().HistoryArchiveStatsCounter.
+ With(prometheus.Labels{
+ "source": historyServerStat.GetBackendName(),
+ "type": "file_downloads"}).
+ Add(float64(historyServerStat.GetDownloads()))
+ s.Metrics().HistoryArchiveStatsCounter.
+ With(prometheus.Labels{
+ "source": historyServerStat.GetBackendName(),
+ "type": "file_uploads"}).
+ Add(float64(historyServerStat.GetUploads()))
+ s.Metrics().HistoryArchiveStatsCounter.
+ With(prometheus.Labels{
+ "source": historyServerStat.GetBackendName(),
+ "type": "requests"}).
+ Add(float64(historyServerStat.GetRequests()))
+ s.Metrics().HistoryArchiveStatsCounter.
+ With(prometheus.Labels{
+ "source": historyServerStat.GetBackendName(),
+ "type": "cache_hits"}).
+ Add(float64(historyServerStat.GetCacheHits()))
+ }
+}
+
type waitForCheckpointState struct{}
func (waitForCheckpointState) String() string {
@@ -732,7 +746,7 @@ func (v verifyRangeState) run(s *system) (transition, error) {
Info("Processed ledger")
}
- err = s.historyQ.RebuildTradeAggregationBuckets(s.ctx, v.fromLedger, v.toLedger, s.config.RoundingSlippageFilter)
+ err = s.RebuildTradeAggregationBuckets(v.fromLedger, v.toLedger)
if err != nil {
return stop(), errors.Wrap(err, "error rebuilding trade aggregations")
}
diff --git a/services/horizon/internal/ingest/fsm_reingest_history_range_state.go b/services/horizon/internal/ingest/fsm_reingest_history_range_state.go
index 4e60f71cd1..832898d021 100644
--- a/services/horizon/internal/ingest/fsm_reingest_history_range_state.go
+++ b/services/horizon/internal/ingest/fsm_reingest_history_range_state.go
@@ -124,13 +124,14 @@ func (h reingestHistoryRangeState) run(s *system) (transition, error) {
h.fromLedger = 2
}
- startTime := time.Now()
+ var startTime time.Time
if h.force {
if t, err := h.prepareRange(s); err != nil {
return t, err
}
+ startTime = time.Now()
if err := s.historyQ.Begin(s.ctx); err != nil {
return stop(), errors.Wrap(err, "Error starting a transaction")
}
@@ -167,6 +168,7 @@ func (h reingestHistoryRangeState) run(s *system) (transition, error) {
return t, err
}
+ startTime = time.Now()
if err := s.historyQ.Begin(s.ctx); err != nil {
return stop(), errors.Wrap(err, "Error starting a transaction")
}
@@ -181,11 +183,6 @@ func (h reingestHistoryRangeState) run(s *system) (transition, error) {
}
}
- err := s.historyQ.RebuildTradeAggregationBuckets(s.ctx, h.fromLedger, h.toLedger, s.config.RoundingSlippageFilter)
- if err != nil {
- return stop(), errors.Wrap(err, "Error rebuilding trade aggregations")
- }
-
log.WithFields(logpkg.F{
"from": h.fromLedger,
"to": h.toLedger,
diff --git a/services/horizon/internal/ingest/history_archive_adapter.go b/services/horizon/internal/ingest/history_archive_adapter.go
index d4cde9436f..7e415787e3 100644
--- a/services/horizon/internal/ingest/history_archive_adapter.go
+++ b/services/horizon/internal/ingest/history_archive_adapter.go
@@ -18,6 +18,7 @@ type historyArchiveAdapterInterface interface {
GetLatestLedgerSequence() (uint32, error)
BucketListHash(sequence uint32) (xdr.Hash, error)
GetState(ctx context.Context, sequence uint32) (ingest.ChangeReader, error)
+ GetStats() []historyarchive.ArchiveStats
}
// newHistoryArchiveAdapter is a constructor to make a historyArchiveAdapter
@@ -71,3 +72,7 @@ func (haa *historyArchiveAdapter) GetState(ctx context.Context, sequence uint32)
return sr, nil
}
+
+func (haa *historyArchiveAdapter) GetStats() []historyarchive.ArchiveStats {
+ return haa.archive.GetStats()
+}
diff --git a/services/horizon/internal/ingest/history_archive_adapter_test.go b/services/horizon/internal/ingest/history_archive_adapter_test.go
index 7c9207cbe4..20d84149fa 100644
--- a/services/horizon/internal/ingest/history_archive_adapter_test.go
+++ b/services/horizon/internal/ingest/history_archive_adapter_test.go
@@ -33,6 +33,11 @@ func (m *mockHistoryArchiveAdapter) GetState(ctx context.Context, sequence uint3
return args.Get(0).(ingest.ChangeReader), args.Error(1)
}
+func (m *mockHistoryArchiveAdapter) GetStats() []historyarchive.ArchiveStats {
+ a := m.Called()
+ return a.Get(0).([]historyarchive.ArchiveStats)
+}
+
func TestGetState_Read(t *testing.T) {
archive, e := getTestArchive()
if !assert.NoError(t, e) {
diff --git a/services/horizon/internal/ingest/ingest_history_range_state_test.go b/services/horizon/internal/ingest/ingest_history_range_state_test.go
index 4598008eb8..4f7d2c4944 100644
--- a/services/horizon/internal/ingest/ingest_history_range_state_test.go
+++ b/services/horizon/internal/ingest/ingest_history_range_state_test.go
@@ -304,16 +304,16 @@ func (s *ReingestHistoryRangeStateTestSuite) TearDownTest() {
func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateInvalidRange() {
// Recreate mock in this single test to remove Rollback assertion.
s.historyQ = &mockDBQ{}
- err := s.system.ReingestRange([]history.LedgerRange{{0, 0}}, false)
+ err := s.system.ReingestRange([]history.LedgerRange{{0, 0}}, false, true)
s.Assert().EqualError(err, "Invalid range: {0 0} genesis ledger starts at 1")
- err = s.system.ReingestRange([]history.LedgerRange{{0, 100}}, false)
+ err = s.system.ReingestRange([]history.LedgerRange{{0, 100}}, false, true)
s.Assert().EqualError(err, "Invalid range: {0 100} genesis ledger starts at 1")
- err = s.system.ReingestRange([]history.LedgerRange{{100, 0}}, false)
+ err = s.system.ReingestRange([]history.LedgerRange{{100, 0}}, false, true)
s.Assert().EqualError(err, "Invalid range: {100 0} from > to")
- err = s.system.ReingestRange([]history.LedgerRange{{100, 99}}, false)
+ err = s.system.ReingestRange([]history.LedgerRange{{100, 99}}, false, true)
s.Assert().EqualError(err, "Invalid range: {100 99} from > to")
}
@@ -323,7 +323,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateInvali
s.historyQ.On("Rollback").Return(nil).Once()
s.historyQ.On("GetTx").Return(&sqlx.Tx{}).Once()
s.system.maxLedgerPerFlush = 0
- err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false)
+ err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false, true)
s.Assert().EqualError(err, "invalid maxLedgerPerFlush, must be greater than 0")
}
@@ -332,28 +332,28 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateBeginR
s.historyQ.On("GetLastLedgerIngestNonBlocking", s.ctx).Return(uint32(0), nil).Once()
s.historyQ.On("Begin", s.ctx).Return(errors.New("my error")).Once()
- err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false)
+ err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false, true)
s.Assert().EqualError(err, "Error starting a transaction: my error")
}
func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateGetLastLedgerIngestNonBlockingError() {
s.historyQ.On("GetLastLedgerIngestNonBlocking", s.ctx).Return(uint32(0), errors.New("my error")).Once()
- err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false)
+ err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false, true)
s.Assert().EqualError(err, "Error getting last ingested ledger: my error")
}
func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateRangeOverlaps() {
s.historyQ.On("GetLastLedgerIngestNonBlocking", s.ctx).Return(uint32(190), nil).Once()
- err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false)
+ err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false, true)
s.Assert().Equal(ErrReingestRangeConflict{190}, err)
}
func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStatRangeOverlapsAtEnd() {
s.historyQ.On("GetLastLedgerIngestNonBlocking", s.ctx).Return(uint32(200), nil).Once()
- err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false)
+ err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false, true)
s.Assert().Equal(ErrReingestRangeConflict{200}, err)
}
@@ -369,7 +369,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateClearH
"DeleteRangeAll", s.ctx, toidFrom.ToInt64(), toidTo.ToInt64(),
).Return(errors.New("my error")).Once()
- err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false)
+ err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false, true)
s.Assert().EqualError(err, "error in DeleteRangeAll: my error")
}
@@ -397,7 +397,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateRunTra
s.ledgerBackend.On("GetLedger", s.ctx, uint32(100)).Return(meta, nil).Once()
s.runner.On("RunTransactionProcessorsOnLedgers", []xdr.LedgerCloseMeta{meta}).Return(errors.New("my error")).Once()
- err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false)
+ err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false, true)
s.Assert().EqualError(err, "error processing ledger range 100 - 100: my error")
}
@@ -428,7 +428,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateCommit
s.runner.On("RunTransactionProcessorsOnLedgers", []xdr.LedgerCloseMeta{meta}).Return(nil).Once()
}
- err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false)
+ err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false, true)
s.Assert().EqualError(err, "Error committing db transaction: my error")
}
@@ -460,7 +460,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateSucces
}
// system.maxLedgerPerFlush has been set by default to 1 in test suite setup
- err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false)
+ err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false, true)
s.Assert().NoError(err)
}
@@ -500,7 +500,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateSucces
s.runner.On("RunTransactionProcessorsOnLedgers", firstLedgersBatch).Return(nil).Once()
s.runner.On("RunTransactionProcessorsOnLedgers", secondLedgersBatch).Return(nil).Once()
s.system.maxLedgerPerFlush = 60
- err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false)
+ err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false, true)
s.Assert().NoError(err)
}
@@ -534,7 +534,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateSucces
s.ledgerBackend.On("GetLedger", s.ctx, uint32(100)).Return(meta, nil).Once()
s.runner.On("RunTransactionProcessorsOnLedgers", []xdr.LedgerCloseMeta{meta}).Return(nil).Once()
- err := s.system.ReingestRange([]history.LedgerRange{{100, 100}}, false)
+ err := s.system.ReingestRange([]history.LedgerRange{{100, 100}}, false, true)
s.Assert().NoError(err)
}
@@ -543,7 +543,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateForceG
s.historyQ.On("Rollback").Return(nil).Once()
s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), errors.New("my error")).Once()
- err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true)
+ err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true, true)
s.Assert().EqualError(err, "Error getting last ingested ledger: my error")
}
@@ -576,7 +576,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateForce(
}
// system.maxLedgerPerFlush has been set by default to 1 in test suite setup
- err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true)
+ err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true, true)
s.Assert().NoError(err)
}
@@ -610,7 +610,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateForceL
s.ledgerBackend.On("GetLedger", s.ctx, uint32(106)).Return(xdr.LedgerCloseMeta{}, errors.New("my error")).Once()
// system.maxLedgerPerFlush has been set by default to 1 in test suite setup
- err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true)
+ err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true, true)
s.Assert().EqualError(err, "error getting ledger: my error")
}
@@ -644,7 +644,7 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateForceL
s.ledgerBackend.On("GetLedger", s.ctx, uint32(106)).Return(xdr.LedgerCloseMeta{}, errors.New("my error")).Once()
// system.maxLedgerPerFlush has been set by default to 1 in test suite setup
- err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true)
+ err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true, true)
s.Assert().EqualError(err, "Error committing db transaction: error getting ledger: my error")
}
@@ -686,6 +686,6 @@ func (s *ReingestHistoryRangeStateTestSuite) TestReingestHistoryRangeStateForceW
s.runner.On("RunTransactionProcessorsOnLedgers", secondLedgersBatch).Return(nil).Once()
s.system.maxLedgerPerFlush = 60
- err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true)
+ err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true, true)
s.Assert().NoError(err)
}
diff --git a/services/horizon/internal/ingest/main.go b/services/horizon/internal/ingest/main.go
index f6c9e23f9f..7d9596db94 100644
--- a/services/horizon/internal/ingest/main.go
+++ b/services/horizon/internal/ingest/main.go
@@ -6,6 +6,7 @@ package ingest
import (
"context"
"fmt"
+ "path"
"runtime"
"sync"
"time"
@@ -80,14 +81,11 @@ const (
var log = logpkg.DefaultLogger.WithField("service", "ingest")
type Config struct {
- CoreSession db.SessionInterface
StellarCoreURL string
- StellarCoreCursor string
CaptiveCoreBinaryPath string
CaptiveCoreStoragePath string
CaptiveCoreToml *ledgerbackend.CaptiveCoreToml
CaptiveCoreConfigUseDB bool
- RemoteCaptiveCoreURL string
NetworkPassphrase string
HistorySession db.SessionInterface
@@ -110,19 +108,8 @@ type Config struct {
EnableIngestionFiltering bool
MaxLedgerPerFlush uint32
-}
-
-// LocalCaptiveCoreEnabled returns true if configured to run
-// a local captive core instance for ingestion.
-func (c Config) LocalCaptiveCoreEnabled() bool {
- // c.RemoteCaptiveCoreURL is always empty when running local captive core.
- return c.RemoteCaptiveCoreURL == ""
-}
-// RemoteCaptiveCoreEnabled returns true if configured to run
-// a remote captive core instance for ingestion.
-func (c Config) RemoteCaptiveCoreEnabled() bool {
- return c.RemoteCaptiveCoreURL != ""
+ SkipSorobanIngestion bool
}
const (
@@ -178,6 +165,9 @@ type Metrics struct {
// ProcessorsRunDurationSummary exposes processors run durations.
ProcessorsRunDurationSummary *prometheus.SummaryVec
+
+ // ArchiveRequestCounter counts how many http requests are sent to history server
+ HistoryArchiveStatsCounter *prometheus.CounterVec
}
type System interface {
@@ -187,10 +177,11 @@ type System interface {
StressTest(numTransactions, changesPerTransaction int) error
VerifyRange(fromLedger, toLedger uint32, verifyState bool) error
BuildState(sequence uint32, skipChecks bool) error
- ReingestRange(ledgerRanges []history.LedgerRange, force bool) error
+ ReingestRange(ledgerRanges []history.LedgerRange, force bool, rebuildTradeAgg bool) error
BuildGenesisState() error
Shutdown()
GetCurrentState() State
+ RebuildTradeAggregationBuckets(fromLedger, toLedger uint32) error
}
type system struct {
@@ -239,7 +230,12 @@ func NewSystem(config Config) (System, error) {
CheckpointFrequency: config.CheckpointFrequency,
ConnectOptions: storage.ConnectOptions{
Context: ctx,
- UserAgent: fmt.Sprintf("horizon/%s golang/%s", apkg.Version(), runtime.Version()),
+ UserAgent: fmt.Sprintf("horizon/%s golang/%s", apkg.Version(), runtime.Version())},
+ CacheConfig: historyarchive.CacheOptions{
+ Cache: true,
+ Path: path.Join(config.CaptiveCoreStoragePath, "bucket-cache"),
+ Log: log.WithField("subservice", "ha-cache"),
+ MaxFiles: 150,
},
},
)
@@ -248,41 +244,26 @@ func NewSystem(config Config) (System, error) {
return nil, errors.Wrap(err, "error creating history archive")
}
- var ledgerBackend ledgerbackend.LedgerBackend
- if config.RemoteCaptiveCoreEnabled() {
- ledgerBackend, err = ledgerbackend.NewRemoteCaptive(config.RemoteCaptiveCoreURL)
- if err != nil {
- cancel()
- return nil, errors.Wrap(err, "error creating captive core backend")
- }
- } else if config.LocalCaptiveCoreEnabled() {
- logger := log.WithField("subservice", "stellar-core")
- ledgerBackend, err = ledgerbackend.NewCaptive(
- ledgerbackend.CaptiveCoreConfig{
- BinaryPath: config.CaptiveCoreBinaryPath,
- StoragePath: config.CaptiveCoreStoragePath,
- UseDB: config.CaptiveCoreConfigUseDB,
- Toml: config.CaptiveCoreToml,
- NetworkPassphrase: config.NetworkPassphrase,
- HistoryArchiveURLs: config.HistoryArchiveURLs,
- CheckpointFrequency: config.CheckpointFrequency,
- LedgerHashStore: ledgerbackend.NewHorizonDBLedgerHashStore(config.HistorySession),
- Log: logger,
- Context: ctx,
- UserAgent: fmt.Sprintf("captivecore horizon/%s golang/%s", apkg.Version(), runtime.Version()),
- },
- )
- if err != nil {
- cancel()
- return nil, errors.Wrap(err, "error creating captive core backend")
- }
- } else {
- coreSession := config.CoreSession.Clone()
- ledgerBackend, err = ledgerbackend.NewDatabaseBackendFromSession(coreSession, config.NetworkPassphrase)
- if err != nil {
- cancel()
- return nil, errors.Wrap(err, "error creating ledger backend")
- }
+ // the only ingest option is local captive core config
+ logger := log.WithField("subservice", "stellar-core")
+ ledgerBackend, err := ledgerbackend.NewCaptive(
+ ledgerbackend.CaptiveCoreConfig{
+ BinaryPath: config.CaptiveCoreBinaryPath,
+ StoragePath: config.CaptiveCoreStoragePath,
+ UseDB: config.CaptiveCoreConfigUseDB,
+ Toml: config.CaptiveCoreToml,
+ NetworkPassphrase: config.NetworkPassphrase,
+ HistoryArchiveURLs: config.HistoryArchiveURLs,
+ CheckpointFrequency: config.CheckpointFrequency,
+ LedgerHashStore: ledgerbackend.NewHorizonDBLedgerHashStore(config.HistorySession),
+ Log: logger,
+ Context: ctx,
+ UserAgent: fmt.Sprintf("captivecore horizon/%s golang/%s", apkg.Version(), runtime.Version()),
+ },
+ )
+ if err != nil {
+ cancel()
+ return nil, errors.Wrap(err, "error creating captive core backend")
}
historyQ := &history.Q{config.HistorySession.Clone()}
@@ -424,6 +405,14 @@ func (s *system) initMetrics() {
},
[]string{"name"},
)
+
+ s.metrics.HistoryArchiveStatsCounter = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "horizon", Subsystem: "ingest", Name: "history_archive_stats_total",
+ Help: "counters of different history archive stats",
+ },
+ []string{"source", "type"},
+ )
}
func (s *system) GetCurrentState() State {
@@ -449,6 +438,7 @@ func (s *system) RegisterMetrics(registry *prometheus.Registry) {
registry.MustRegister(s.metrics.ProcessorsRunDuration)
registry.MustRegister(s.metrics.ProcessorsRunDurationSummary)
registry.MustRegister(s.metrics.StateVerifyLedgerEntriesCount)
+ registry.MustRegister(s.metrics.HistoryArchiveStatsCounter)
s.ledgerBackend = ledgerbackend.WithMetrics(s.ledgerBackend, registry, "horizon")
}
@@ -543,7 +533,7 @@ func validateRanges(ledgerRanges []history.LedgerRange) error {
// ReingestRange runs the ingestion pipeline on the range of ledgers ingesting
// history data only.
-func (s *system) ReingestRange(ledgerRanges []history.LedgerRange, force bool) error {
+func (s *system) ReingestRange(ledgerRanges []history.LedgerRange, force bool, rebuildTradeAgg bool) error {
if err := validateRanges(ledgerRanges); err != nil {
return err
}
@@ -564,10 +554,20 @@ func (s *system) ReingestRange(ledgerRanges []history.LedgerRange, force bool) e
if err != nil {
return err
}
+ if rebuildTradeAgg {
+ err = s.RebuildTradeAggregationBuckets(cur.StartSequence, cur.EndSequence)
+ if err != nil {
+ return errors.Wrap(err, "Error rebuilding trade aggregations")
+ }
+ }
}
return nil
}
+func (s *system) RebuildTradeAggregationBuckets(fromLedger, toLedger uint32) error {
+ return s.historyQ.RebuildTradeAggregationBuckets(s.ctx, fromLedger, toLedger, s.config.RoundingSlippageFilter)
+}
+
// BuildGenesisState runs the ingestion pipeline on genesis ledger. Transitions
// to stopState when done.
func (s *system) BuildGenesisState() error {
@@ -755,26 +755,6 @@ func (s *system) resetStateVerificationErrors() {
s.stateVerificationErrors = 0
}
-func (s *system) updateCursor(ledgerSequence uint32) error {
- if s.stellarCoreClient == nil {
- return nil
- }
-
- cursor := defaultCoreCursorName
- if s.config.StellarCoreCursor != "" {
- cursor = s.config.StellarCoreCursor
- }
-
- ctx, cancel := context.WithTimeout(s.ctx, time.Second)
- defer cancel()
- err := s.stellarCoreClient.SetCursor(ctx, cursor, int32(ledgerSequence))
- if err != nil {
- return errors.Wrap(err, "Setting stellar-core cursor failed")
- }
-
- return nil
-}
-
func (s *system) Shutdown() {
log.Info("Shutting down ingestion system...")
s.stateVerificationMutex.Lock()
diff --git a/services/horizon/internal/ingest/main_test.go b/services/horizon/internal/ingest/main_test.go
index 55860eeaff..80b5a40ed1 100644
--- a/services/horizon/internal/ingest/main_test.go
+++ b/services/horizon/internal/ingest/main_test.go
@@ -90,7 +90,6 @@ func TestLedgerEligibleForStateVerification(t *testing.T) {
func TestNewSystem(t *testing.T) {
config := Config{
- CoreSession: &db.Session{DB: &sqlx.DB{}},
HistorySession: &db.Session{DB: &sqlx.DB{}},
DisableStateVerification: true,
HistoryArchiveURLs: []string{"https://history.stellar.org/prd/core-live/core_live_001"},
@@ -593,8 +592,8 @@ func (m *mockSystem) BuildState(sequence uint32, skipChecks bool) error {
return args.Error(0)
}
-func (m *mockSystem) ReingestRange(ledgerRanges []history.LedgerRange, force bool) error {
- args := m.Called(ledgerRanges, force)
+func (m *mockSystem) ReingestRange(ledgerRanges []history.LedgerRange, force bool, rebuildTradeAgg bool) error {
+ args := m.Called(ledgerRanges, force, rebuildTradeAgg)
return args.Error(0)
}
@@ -608,6 +607,11 @@ func (m *mockSystem) GetCurrentState() State {
return args.Get(0).(State)
}
+func (m *mockSystem) RebuildTradeAggregationBuckets(fromLedger, toLedger uint32) error {
+ args := m.Called(fromLedger, toLedger)
+ return args.Error(0)
+}
+
func (m *mockSystem) Shutdown() {
m.Called()
}
diff --git a/services/horizon/internal/ingest/parallel.go b/services/horizon/internal/ingest/parallel.go
index b3c163689d..4f07c21cc4 100644
--- a/services/horizon/internal/ingest/parallel.go
+++ b/services/horizon/internal/ingest/parallel.go
@@ -2,6 +2,7 @@ package ingest
import (
"fmt"
+ "math"
"sync"
"github.com/stellar/go/services/horizon/internal/db2/history"
@@ -52,9 +53,6 @@ func (ps *ParallelSystems) Shutdown() {
if ps.config.HistorySession != nil {
ps.config.HistorySession.Close()
}
- if ps.config.CoreSession != nil {
- ps.config.CoreSession.Close()
- }
}
func (ps *ParallelSystems) runReingestWorker(s System, stop <-chan struct{}, reingestJobQueue <-chan history.LedgerRange) rangeError {
@@ -64,7 +62,7 @@ func (ps *ParallelSystems) runReingestWorker(s System, stop <-chan struct{}, rei
case <-stop:
return rangeError{}
case reingestRange := <-reingestJobQueue:
- err := s.ReingestRange([]history.LedgerRange{reingestRange}, false)
+ err := s.ReingestRange([]history.LedgerRange{reingestRange}, false, false)
if err != nil {
return rangeError{
err: err,
@@ -76,7 +74,24 @@ func (ps *ParallelSystems) runReingestWorker(s System, stop <-chan struct{}, rei
}
}
-func enqueueReingestTasks(ledgerRanges []history.LedgerRange, batchSize uint32, stop <-chan struct{}, reingestJobQueue chan<- history.LedgerRange) {
+func (ps *ParallelSystems) rebuildTradeAggRanges(ledgerRanges []history.LedgerRange) error {
+ s, err := ps.systemFactory(ps.config)
+ if err != nil {
+ return err
+ }
+
+ for _, cur := range ledgerRanges {
+ err := s.RebuildTradeAggregationBuckets(cur.StartSequence, cur.EndSequence)
+ if err != nil {
+ return errors.Wrapf(err, "Error rebuilding trade aggregations for range start=%v, stop=%v", cur.StartSequence, cur.EndSequence)
+ }
+ }
+ return nil
+}
+
+// returns the lowest ledger to start from of all ledgerRanges
+func enqueueReingestTasks(ledgerRanges []history.LedgerRange, batchSize uint32, stop <-chan struct{}, reingestJobQueue chan<- history.LedgerRange) uint32 {
+ lowestLedger := uint32(math.MaxUint32)
for _, cur := range ledgerRanges {
for subRangeFrom := cur.StartSequence; subRangeFrom < cur.EndSequence; {
// job queuing
@@ -86,12 +101,16 @@ func enqueueReingestTasks(ledgerRanges []history.LedgerRange, batchSize uint32,
}
select {
case <-stop:
- return
+ return lowestLedger
case reingestJobQueue <- history.LedgerRange{StartSequence: subRangeFrom, EndSequence: subRangeTo}:
}
+ if subRangeFrom < lowestLedger {
+ lowestLedger = subRangeFrom
+ }
subRangeFrom = subRangeTo + 1
}
}
+ return lowestLedger
}
func calculateParallelLedgerBatchSize(rangeSize uint32, batchSizeSuggestion uint32, workerCount uint) uint32 {
@@ -169,7 +188,7 @@ func (ps *ParallelSystems) ReingestRange(ledgerRanges []history.LedgerRange, bat
}()
}
- enqueueReingestTasks(ledgerRanges, batchSize, stop, reingestJobQueue)
+ lowestLedger := enqueueReingestTasks(ledgerRanges, batchSize, stop, reingestJobQueue)
stopOnce.Do(func() {
close(stop)
@@ -179,7 +198,13 @@ func (ps *ParallelSystems) ReingestRange(ledgerRanges []history.LedgerRange, bat
if lowestRangeErr != nil {
lastLedger := ledgerRanges[len(ledgerRanges)-1].EndSequence
+ if err := ps.rebuildTradeAggRanges([]history.LedgerRange{{StartSequence: lowestLedger, EndSequence: lowestRangeErr.ledgerRange.StartSequence}}); err != nil {
+ log.WithError(err).Errorf("error when trying to rebuild trade agg for partially completed portion of overall parallel reingestion range, start=%v, stop=%v", lowestLedger, lowestRangeErr.ledgerRange.StartSequence)
+ }
return errors.Wrapf(lowestRangeErr, "job failed, recommended restart range: [%d, %d]", lowestRangeErr.ledgerRange.StartSequence, lastLedger)
}
+ if err := ps.rebuildTradeAggRanges(ledgerRanges); err != nil {
+ return err
+ }
return nil
}
diff --git a/services/horizon/internal/ingest/parallel_test.go b/services/horizon/internal/ingest/parallel_test.go
index 27ab0c459f..8004a4048c 100644
--- a/services/horizon/internal/ingest/parallel_test.go
+++ b/services/horizon/internal/ingest/parallel_test.go
@@ -31,7 +31,7 @@ func TestParallelReingestRange(t *testing.T) {
m sync.Mutex
)
result := &mockSystem{}
- result.On("ReingestRange", mock.AnythingOfType("[]history.LedgerRange"), mock.AnythingOfType("bool")).Run(
+ result.On("ReingestRange", mock.AnythingOfType("[]history.LedgerRange"), false, false).Run(
func(args mock.Arguments) {
m.Lock()
defer m.Unlock()
@@ -39,6 +39,7 @@ func TestParallelReingestRange(t *testing.T) {
// simulate call
time.Sleep(time.Millisecond * time.Duration(10+rand.Int31n(50)))
}).Return(error(nil))
+ result.On("RebuildTradeAggregationBuckets", uint32(1), uint32(2050)).Return(nil).Once()
factory := func(c Config) (System, error) {
return result, nil
}
@@ -59,6 +60,7 @@ func TestParallelReingestRange(t *testing.T) {
rangesCalled = nil
system, err = newParallelSystems(config, 1, factory)
assert.NoError(t, err)
+ result.On("RebuildTradeAggregationBuckets", uint32(1), uint32(1024)).Return(nil).Once()
err = system.ReingestRange([]history.LedgerRange{{1, 1024}}, 64)
result.AssertExpectations(t)
expected = []history.LedgerRange{
@@ -75,8 +77,10 @@ func TestParallelReingestRangeError(t *testing.T) {
config := Config{}
result := &mockSystem{}
// Fail on the second range
- result.On("ReingestRange", []history.LedgerRange{{1537, 1792}}, mock.AnythingOfType("bool")).Return(errors.New("failed because of foo"))
- result.On("ReingestRange", mock.AnythingOfType("[]history.LedgerRange"), mock.AnythingOfType("bool")).Return(error(nil))
+ result.On("ReingestRange", []history.LedgerRange{{1537, 1792}}, false, false).Return(errors.New("failed because of foo")).Once()
+ result.On("ReingestRange", mock.AnythingOfType("[]history.LedgerRange"), false, false).Return(nil)
+ result.On("RebuildTradeAggregationBuckets", uint32(1), uint32(1537)).Return(nil).Once()
+
factory := func(c Config) (System, error) {
return result, nil
}
@@ -94,17 +98,18 @@ func TestParallelReingestRangeErrorInEarlierJob(t *testing.T) {
wg.Add(1)
result := &mockSystem{}
// Fail on an lower subrange after the first error
- result.On("ReingestRange", []history.LedgerRange{{1025, 1280}}, mock.AnythingOfType("bool")).Run(func(mock.Arguments) {
+ result.On("ReingestRange", []history.LedgerRange{{1025, 1280}}, false, false).Run(func(mock.Arguments) {
// Wait for a more recent range to error
wg.Wait()
// This sleep should help making sure the result of this range is processed later than the one below
// (there are no guarantees without instrumenting ReingestRange(), but that's too complicated)
time.Sleep(50 * time.Millisecond)
- }).Return(errors.New("failed because of foo"))
- result.On("ReingestRange", []history.LedgerRange{{1537, 1792}}, mock.AnythingOfType("bool")).Run(func(mock.Arguments) {
+ }).Return(errors.New("failed because of foo")).Once()
+ result.On("ReingestRange", []history.LedgerRange{{1537, 1792}}, false, false).Run(func(mock.Arguments) {
wg.Done()
- }).Return(errors.New("failed because of bar"))
- result.On("ReingestRange", mock.AnythingOfType("[]history.LedgerRange"), mock.AnythingOfType("bool")).Return(error(nil))
+ }).Return(errors.New("failed because of bar")).Once()
+ result.On("ReingestRange", mock.AnythingOfType("[]history.LedgerRange"), false, false).Return(error(nil))
+ result.On("RebuildTradeAggregationBuckets", uint32(1), uint32(1025)).Return(nil).Once()
factory := func(c Config) (System, error) {
return result, nil
diff --git a/services/horizon/internal/ingest/processor_runner.go b/services/horizon/internal/ingest/processor_runner.go
index ed066a20d2..a09442b49d 100644
--- a/services/horizon/internal/ingest/processor_runner.go
+++ b/services/horizon/internal/ingest/processor_runner.go
@@ -111,6 +111,7 @@ func buildChangeProcessor(
source ingestionSource,
ledgerSequence uint32,
networkPassphrase string,
+ skipSorobanIngestion bool,
) *groupChangeProcessors {
statsChangeProcessor := &statsChangeProcessor{
StatsChangeProcessor: changeStats,
@@ -144,13 +145,13 @@ func (s *ProcessorRunner) buildTransactionProcessor(ledgersProcessor *processors
processors := []horizonTransactionProcessor{
statsLedgerTransactionProcessor,
- processors.NewEffectProcessor(accountLoader, s.historyQ.NewEffectBatchInsertBuilder(), s.config.NetworkPassphrase),
+ processors.NewEffectProcessor(accountLoader, s.historyQ.NewEffectBatchInsertBuilder(), s.config.NetworkPassphrase, s.config.SkipSorobanIngestion),
ledgersProcessor,
- processors.NewOperationProcessor(s.historyQ.NewOperationBatchInsertBuilder(), s.config.NetworkPassphrase),
+ processors.NewOperationProcessor(s.historyQ.NewOperationBatchInsertBuilder(), s.config.NetworkPassphrase, s.config.SkipSorobanIngestion),
tradeProcessor,
processors.NewParticipantsProcessor(accountLoader,
s.historyQ.NewTransactionParticipantsBatchInsertBuilder(), s.historyQ.NewOperationParticipantBatchInsertBuilder()),
- processors.NewTransactionProcessor(s.historyQ.NewTransactionBatchInsertBuilder()),
+ processors.NewTransactionProcessor(s.historyQ.NewTransactionBatchInsertBuilder(), s.config.SkipSorobanIngestion),
processors.NewClaimableBalancesTransactionProcessor(cbLoader,
s.historyQ.NewTransactionClaimableBalanceBatchInsertBuilder(), s.historyQ.NewOperationClaimableBalanceBatchInsertBuilder()),
processors.NewLiquidityPoolsTransactionProcessor(lpLoader,
@@ -172,7 +173,10 @@ func (s *ProcessorRunner) buildFilteredOutProcessor() *groupTransactionProcessor
// when in online mode, the submission result processor must always run (regardless of filtering)
var p []horizonTransactionProcessor
if s.config.EnableIngestionFiltering {
- txSubProc := processors.NewTransactionFilteredTmpProcessor(s.historyQ.NewTransactionFilteredTmpBatchInsertBuilder())
+ txSubProc := processors.NewTransactionFilteredTmpProcessor(
+ s.historyQ.NewTransactionFilteredTmpBatchInsertBuilder(),
+ s.config.SkipSorobanIngestion,
+ )
p = append(p, txSubProc)
}
@@ -235,6 +239,7 @@ func (s *ProcessorRunner) RunHistoryArchiveIngestion(
historyArchiveSource,
checkpointLedger,
s.config.NetworkPassphrase,
+ s.config.SkipSorobanIngestion,
)
if checkpointLedger == 1 {
@@ -353,7 +358,7 @@ func (s *ProcessorRunner) streamLedger(ledger xdr.LedgerCloseMeta,
"ledger": true,
"commit": false,
"duration": time.Since(startTime).Seconds(),
- }).Info("Processed ledger")
+ }).Info("Transaction processors finished for ledger")
return nil
}
@@ -493,6 +498,7 @@ func (s *ProcessorRunner) RunAllProcessorsOnLedger(ledger xdr.LedgerCloseMeta) (
ledgerSource,
ledger.LedgerSequence(),
s.config.NetworkPassphrase,
+ s.config.SkipSorobanIngestion,
)
err = s.runChangeProcessorOnLedger(groupChangeProcessors, ledger)
if err != nil {
diff --git a/services/horizon/internal/ingest/processor_runner_test.go b/services/horizon/internal/ingest/processor_runner_test.go
index eaeca95661..ddac48aa82 100644
--- a/services/horizon/internal/ingest/processor_runner_test.go
+++ b/services/horizon/internal/ingest/processor_runner_test.go
@@ -180,7 +180,7 @@ func TestProcessorRunnerBuildChangeProcessor(t *testing.T) {
}
stats := &ingest.StatsChangeProcessor{}
- processor := buildChangeProcessor(runner.historyQ, stats, ledgerSource, 123, "")
+ processor := buildChangeProcessor(runner.historyQ, stats, ledgerSource, 123, "", false)
assert.IsType(t, &groupChangeProcessors{}, processor)
assert.IsType(t, &statsChangeProcessor{}, processor.processors[0])
@@ -201,7 +201,7 @@ func TestProcessorRunnerBuildChangeProcessor(t *testing.T) {
filters: &MockFilters{},
}
- processor = buildChangeProcessor(runner.historyQ, stats, historyArchiveSource, 456, "")
+ processor = buildChangeProcessor(runner.historyQ, stats, historyArchiveSource, 456, "", false)
assert.IsType(t, &groupChangeProcessors{}, processor)
assert.IsType(t, &statsChangeProcessor{}, processor.processors[0])
@@ -271,6 +271,7 @@ func TestProcessorRunnerWithFilterEnabled(t *testing.T) {
config := Config{
NetworkPassphrase: network.PublicNetworkPassphrase,
EnableIngestionFiltering: true,
+ SkipSorobanIngestion: false,
}
q := &mockDBQ{}
diff --git a/services/horizon/internal/ingest/processors/effects_processor.go b/services/horizon/internal/ingest/processors/effects_processor.go
index 34e9f9169a..830632f5f5 100644
--- a/services/horizon/internal/ingest/processors/effects_processor.go
+++ b/services/horizon/internal/ingest/processors/effects_processor.go
@@ -28,17 +28,20 @@ type EffectProcessor struct {
accountLoader *history.AccountLoader
batch history.EffectBatchInsertBuilder
network string
+ skipSoroban bool
}
func NewEffectProcessor(
accountLoader *history.AccountLoader,
batch history.EffectBatchInsertBuilder,
network string,
+ skipSoroban bool,
) *EffectProcessor {
return &EffectProcessor{
accountLoader: accountLoader,
batch: batch,
network: network,
+ skipSoroban: skipSoroban,
}
}
@@ -50,14 +53,29 @@ func (p *EffectProcessor) ProcessTransaction(
return nil
}
- for opi, op := range transaction.Envelope.Operations() {
+ elidedTransaction := transaction
+
+ if p.skipSoroban &&
+ elidedTransaction.UnsafeMeta.V == 3 &&
+ elidedTransaction.UnsafeMeta.V3.SorobanMeta != nil {
+ elidedTransaction.UnsafeMeta.V3 = &xdr.TransactionMetaV3{
+ Ext: xdr.ExtensionPoint{},
+ TxChangesBefore: xdr.LedgerEntryChanges{},
+ Operations: []xdr.OperationMeta{},
+ TxChangesAfter: xdr.LedgerEntryChanges{},
+ SorobanMeta: nil,
+ }
+ }
+
+ for opi, op := range elidedTransaction.Envelope.Operations() {
operation := transactionOperationWrapper{
index: uint32(opi),
- transaction: transaction,
+ transaction: elidedTransaction,
operation: op,
ledgerSequence: uint32(lcm.LedgerSequence()),
network: p.network,
}
+
if err := operation.ingestEffects(p.accountLoader, p.batch); err != nil {
return errors.Wrapf(err, "reading operation %v effects", operation.ID())
}
diff --git a/services/horizon/internal/ingest/processors/effects_processor_test.go b/services/horizon/internal/ingest/processors/effects_processor_test.go
index 0243768fde..70af21737a 100644
--- a/services/horizon/internal/ingest/processors/effects_processor_test.go
+++ b/services/horizon/internal/ingest/processors/effects_processor_test.go
@@ -143,6 +143,7 @@ func (s *EffectsProcessorTestSuiteLedger) SetupTest() {
s.accountLoader,
s.mockBatchInsertBuilder,
networkPassphrase,
+ false,
)
s.txs = []ingest.LedgerTransaction{
diff --git a/services/horizon/internal/ingest/processors/operations_processor.go b/services/horizon/internal/ingest/processors/operations_processor.go
index 8ad023145c..92a4b870e9 100644
--- a/services/horizon/internal/ingest/processors/operations_processor.go
+++ b/services/horizon/internal/ingest/processors/operations_processor.go
@@ -22,14 +22,16 @@ import (
// OperationProcessor operations processor
type OperationProcessor struct {
- batch history.OperationBatchInsertBuilder
- network string
+ batch history.OperationBatchInsertBuilder
+ network string
+ skipSoroban bool
}
-func NewOperationProcessor(batch history.OperationBatchInsertBuilder, network string) *OperationProcessor {
+func NewOperationProcessor(batch history.OperationBatchInsertBuilder, network string, skipSoroban bool) *OperationProcessor {
return &OperationProcessor{
- batch: batch,
- network: network,
+ batch: batch,
+ network: network,
+ skipSoroban: skipSoroban,
}
}
@@ -37,11 +39,12 @@ func NewOperationProcessor(batch history.OperationBatchInsertBuilder, network st
func (p *OperationProcessor) ProcessTransaction(lcm xdr.LedgerCloseMeta, transaction ingest.LedgerTransaction) error {
for i, op := range transaction.Envelope.Operations() {
operation := transactionOperationWrapper{
- index: uint32(i),
- transaction: transaction,
- operation: op,
- ledgerSequence: lcm.LedgerSequence(),
- network: p.network,
+ index: uint32(i),
+ transaction: transaction,
+ operation: op,
+ ledgerSequence: lcm.LedgerSequence(),
+ network: p.network,
+ skipSorobanDetails: p.skipSoroban,
}
details, err := operation.Details()
if err != nil {
@@ -82,11 +85,12 @@ func (p *OperationProcessor) Flush(ctx context.Context, session db.SessionInterf
// transactionOperationWrapper represents the data for a single operation within a transaction
type transactionOperationWrapper struct {
- index uint32
- transaction ingest.LedgerTransaction
- operation xdr.Operation
- ledgerSequence uint32
- network string
+ index uint32
+ transaction ingest.LedgerTransaction
+ operation xdr.Operation
+ ledgerSequence uint32
+ network string
+ skipSorobanDetails bool
}
// ID returns the ID for the operation.
@@ -266,6 +270,11 @@ func (operation *transactionOperationWrapper) IsPayment() bool {
case xdr.OperationTypeAccountMerge:
return true
case xdr.OperationTypeInvokeHostFunction:
+ // #5175, may want to consider skipping this parsing of payment from contracts
+ // as part of eliding soroban ingestion aspects when DISABLE_SOROBAN_INGEST.
+ // but, may cause inconsistencies that aren't worth the gain,
+ // as payments won't be thoroughly accurate, i.e. a payment could have
+ // happened within a contract invoke.
diagnosticEvents, err := operation.transaction.GetDiagnosticEvents()
if err != nil {
return false
@@ -689,11 +698,18 @@ func (operation *transactionOperationWrapper) Details() (map[string]interface{},
}
details["parameters"] = params
- if balanceChanges, err := operation.parseAssetBalanceChangesFromContractEvents(); err != nil {
- return nil, err
+ var balanceChanges []map[string]interface{}
+ var parseErr error
+ if operation.skipSorobanDetails {
+ // https://github.com/stellar/go/issues/5175
+ // intentionally toggle off parsing soroban meta into "asset_balance_changes"
+ balanceChanges = make([]map[string]interface{}, 0)
} else {
- details["asset_balance_changes"] = balanceChanges
+ if balanceChanges, parseErr = operation.parseAssetBalanceChangesFromContractEvents(); parseErr != nil {
+ return nil, parseErr
+ }
}
+ details["asset_balance_changes"] = balanceChanges
case xdr.HostFunctionTypeHostFunctionTypeCreateContract:
args := op.HostFunction.MustCreateContract()
diff --git a/services/horizon/internal/ingest/processors/operations_processor_test.go b/services/horizon/internal/ingest/processors/operations_processor_test.go
index 4b5fb376cd..275a6056e4 100644
--- a/services/horizon/internal/ingest/processors/operations_processor_test.go
+++ b/services/horizon/internal/ingest/processors/operations_processor_test.go
@@ -42,6 +42,7 @@ func (s *OperationsProcessorTestSuiteLedger) SetupTest() {
s.processor = NewOperationProcessor(
s.mockBatchInsertBuilder,
"test network",
+ false,
)
}
@@ -375,6 +376,65 @@ func (s *OperationsProcessorTestSuiteLedger) TestOperationTypeInvokeHostFunction
}
s.Assert().Equal(found, 4, "should have one balance changed record for each of mint, burn, clawback, transfer")
})
+
+ s.T().Run("InvokeContractAssetBalancesElidedFromDetails", func(t *testing.T) {
+ randomIssuer := keypair.MustRandom()
+ randomAsset := xdr.MustNewCreditAsset("TESTING", randomIssuer.Address())
+ passphrase := "passphrase"
+ randomAccount := keypair.MustRandom().Address()
+ contractId := [32]byte{}
+ zeroContractStrKey, err := strkey.Encode(strkey.VersionByteContract, contractId[:])
+ s.Assert().NoError(err)
+
+ transferContractEvent := contractevents.GenerateEvent(contractevents.EventTypeTransfer, randomAccount, zeroContractStrKey, "", randomAsset, big.NewInt(10000000), passphrase)
+ burnContractEvent := contractevents.GenerateEvent(contractevents.EventTypeBurn, zeroContractStrKey, "", "", randomAsset, big.NewInt(10000000), passphrase)
+ mintContractEvent := contractevents.GenerateEvent(contractevents.EventTypeMint, "", zeroContractStrKey, randomAccount, randomAsset, big.NewInt(10000000), passphrase)
+ clawbackContractEvent := contractevents.GenerateEvent(contractevents.EventTypeClawback, zeroContractStrKey, "", randomAccount, randomAsset, big.NewInt(10000000), passphrase)
+
+ tx = ingest.LedgerTransaction{
+ UnsafeMeta: xdr.TransactionMeta{
+ V: 3,
+ V3: &xdr.TransactionMetaV3{
+ SorobanMeta: &xdr.SorobanTransactionMeta{
+ Events: []xdr.ContractEvent{
+ transferContractEvent,
+ burnContractEvent,
+ mintContractEvent,
+ clawbackContractEvent,
+ },
+ },
+ },
+ },
+ }
+ wrapper := transactionOperationWrapper{
+ skipSorobanDetails: true,
+ transaction: tx,
+ operation: xdr.Operation{
+ SourceAccount: &source,
+ Body: xdr.OperationBody{
+ Type: xdr.OperationTypeInvokeHostFunction,
+ InvokeHostFunctionOp: &xdr.InvokeHostFunctionOp{
+ HostFunction: xdr.HostFunction{
+ Type: xdr.HostFunctionTypeHostFunctionTypeInvokeContract,
+ InvokeContract: &xdr.InvokeContractArgs{
+ ContractAddress: xdr.ScAddress{
+ Type: xdr.ScAddressTypeScAddressTypeContract,
+ ContractId: &xdr.Hash{0x1, 0x2},
+ },
+ FunctionName: "foo",
+ Args: xdr.ScVec{},
+ },
+ },
+ },
+ },
+ },
+ network: passphrase,
+ }
+
+ details, err := wrapper.Details()
+ s.Assert().NoError(err)
+ s.Assert().Len(details["asset_balance_changes"], 0, "for invokehostfn op, no asset balances should be in details when skip soroban is enabled")
+ })
}
func (s *OperationsProcessorTestSuiteLedger) assertInvokeHostFunctionParameter(parameters []map[string]string, paramPosition int, expectedType string, expectedVal xdr.ScVal) {
diff --git a/services/horizon/internal/ingest/processors/transactions_processor.go b/services/horizon/internal/ingest/processors/transactions_processor.go
index 871c72624a..b82934d86a 100644
--- a/services/horizon/internal/ingest/processors/transactions_processor.go
+++ b/services/horizon/internal/ingest/processors/transactions_processor.go
@@ -11,23 +11,40 @@ import (
)
type TransactionProcessor struct {
- batch history.TransactionBatchInsertBuilder
+ batch history.TransactionBatchInsertBuilder
+ skipSoroban bool
}
-func NewTransactionFilteredTmpProcessor(batch history.TransactionBatchInsertBuilder) *TransactionProcessor {
+func NewTransactionFilteredTmpProcessor(batch history.TransactionBatchInsertBuilder, skipSoroban bool) *TransactionProcessor {
return &TransactionProcessor{
- batch: batch,
+ batch: batch,
+ skipSoroban: skipSoroban,
}
}
-func NewTransactionProcessor(batch history.TransactionBatchInsertBuilder) *TransactionProcessor {
+func NewTransactionProcessor(batch history.TransactionBatchInsertBuilder, skipSoroban bool) *TransactionProcessor {
return &TransactionProcessor{
- batch: batch,
+ batch: batch,
+ skipSoroban: skipSoroban,
}
}
func (p *TransactionProcessor) ProcessTransaction(lcm xdr.LedgerCloseMeta, transaction ingest.LedgerTransaction) error {
- if err := p.batch.Add(transaction, lcm.LedgerSequence()); err != nil {
+ elidedTransaction := transaction
+
+ if p.skipSoroban &&
+ elidedTransaction.UnsafeMeta.V == 3 &&
+ elidedTransaction.UnsafeMeta.MustV3().SorobanMeta != nil {
+ elidedTransaction.UnsafeMeta.V3 = &xdr.TransactionMetaV3{
+ Ext: xdr.ExtensionPoint{},
+ TxChangesBefore: xdr.LedgerEntryChanges{},
+ Operations: []xdr.OperationMeta{},
+ TxChangesAfter: xdr.LedgerEntryChanges{},
+ SorobanMeta: nil,
+ }
+ }
+
+ if err := p.batch.Add(elidedTransaction, lcm.LedgerSequence()); err != nil {
return errors.Wrap(err, "Error batch inserting transaction rows")
}
diff --git a/services/horizon/internal/ingest/processors/transactions_processor_test.go b/services/horizon/internal/ingest/processors/transactions_processor_test.go
index 987e8ce6f9..873a72af05 100644
--- a/services/horizon/internal/ingest/processors/transactions_processor_test.go
+++ b/services/horizon/internal/ingest/processors/transactions_processor_test.go
@@ -29,7 +29,7 @@ func TestTransactionsProcessorTestSuiteLedger(t *testing.T) {
func (s *TransactionsProcessorTestSuiteLedger) SetupTest() {
s.ctx = context.Background()
s.mockBatchInsertBuilder = &history.MockTransactionsBatchInsertBuilder{}
- s.processor = NewTransactionProcessor(s.mockBatchInsertBuilder)
+ s.processor = NewTransactionProcessor(s.mockBatchInsertBuilder, false)
}
func (s *TransactionsProcessorTestSuiteLedger) TearDownTest() {
diff --git a/services/horizon/internal/ingest/resume_state_test.go b/services/horizon/internal/ingest/resume_state_test.go
index 82a7869d4b..f1f8b2ce2a 100644
--- a/services/horizon/internal/ingest/resume_state_test.go
+++ b/services/horizon/internal/ingest/resume_state_test.go
@@ -9,6 +9,7 @@ import (
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/suite"
+ "github.com/stellar/go/historyarchive"
"github.com/stellar/go/ingest/ledgerbackend"
"github.com/stellar/go/support/errors"
"github.com/stellar/go/xdr"
@@ -260,6 +261,13 @@ func (s *ResumeTestTestSuite) mockSuccessfulIngestion() {
s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(100), nil).Once()
s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once()
s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(100), nil)
+ mockStats := &historyarchive.MockArchiveStats{}
+ mockStats.On("GetBackendName").Return("name")
+ mockStats.On("GetDownloads").Return(uint32(0))
+ mockStats.On("GetRequests").Return(uint32(0))
+ mockStats.On("GetUploads").Return(uint32(0))
+ mockStats.On("GetCacheHits").Return(uint32(0))
+ s.historyAdapter.On("GetStats").Return([]historyarchive.ArchiveStats{mockStats}).Once()
s.runner.On("RunAllProcessorsOnLedger", mock.AnythingOfType("xdr.LedgerCloseMeta")).
Run(func(args mock.Arguments) {
@@ -273,14 +281,6 @@ func (s *ResumeTestTestSuite) mockSuccessfulIngestion() {
s.historyQ.On("UpdateLastLedgerIngest", s.ctx, uint32(101)).Return(nil).Once()
s.historyQ.On("Commit").Return(nil).Once()
s.historyQ.On("RebuildTradeAggregationBuckets", s.ctx, uint32(101), uint32(101), 0).Return(nil).Once()
-
- s.stellarCoreClient.On(
- "SetCursor",
- mock.AnythingOfType("*context.timerCtx"),
- defaultCoreCursorName,
- int32(101),
- ).Return(nil).Once()
-
s.historyQ.On("GetExpStateInvalid", s.ctx).Return(false, nil).Once()
}
func (s *ResumeTestTestSuite) TestBumpIngestLedger() {
@@ -303,13 +303,6 @@ func (s *ResumeTestTestSuite) TestBumpIngestLedger() {
s.historyQ.On("Begin", s.ctx).Return(nil).Once()
s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(101), nil).Once()
- s.stellarCoreClient.On(
- "SetCursor",
- mock.AnythingOfType("*context.timerCtx"),
- defaultCoreCursorName,
- int32(101),
- ).Return(errors.New("my error")).Once()
-
next, err := resumeState{latestSuccessfullyProcessedLedger: 99}.run(s.system)
s.Assert().NoError(err)
s.Assert().Equal(
@@ -335,45 +328,6 @@ func (s *ResumeTestTestSuite) TestIngestAllMasterNode() {
)
}
-func (s *ResumeTestTestSuite) TestErrorSettingCursorIgnored() {
- s.historyQ.On("Begin", s.ctx).Return(nil).Once()
- s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(100), nil).Once()
- s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once()
- s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(100), nil)
-
- s.runner.On("RunAllProcessorsOnLedger", mock.AnythingOfType("xdr.LedgerCloseMeta")).
- Run(func(args mock.Arguments) {
- meta := args.Get(0).(xdr.LedgerCloseMeta)
- s.Assert().Equal(uint32(101), meta.LedgerSequence())
- }).
- Return(
- ledgerStats{},
- nil,
- ).Once()
- s.historyQ.On("UpdateLastLedgerIngest", s.ctx, uint32(101)).Return(nil).Once()
- s.historyQ.On("Commit").Return(nil).Once()
-
- s.stellarCoreClient.On(
- "SetCursor",
- mock.AnythingOfType("*context.timerCtx"),
- defaultCoreCursorName,
- int32(101),
- ).Return(errors.New("my error")).Once()
-
- s.historyQ.On("GetExpStateInvalid", s.ctx).Return(false, nil).Once()
- s.historyQ.On("RebuildTradeAggregationBuckets", s.ctx, uint32(101), uint32(101), 0).Return(nil).Once()
-
- next, err := resumeState{latestSuccessfullyProcessedLedger: 100}.run(s.system)
- s.Assert().NoError(err)
- s.Assert().Equal(
- transition{
- node: resumeState{latestSuccessfullyProcessedLedger: 101},
- sleepDuration: 0,
- },
- next,
- )
-}
-
func (s *ResumeTestTestSuite) TestRebuildTradeAggregationBucketsError() {
s.historyQ.On("Begin", s.ctx).Return(nil).Once()
s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(100), nil).Once()
@@ -422,15 +376,15 @@ func (s *ResumeTestTestSuite) TestReapingObjectsDisabled() {
s.historyQ.On("UpdateLastLedgerIngest", s.ctx, uint32(101)).Return(nil).Once()
s.historyQ.On("Commit").Return(nil).Once()
- s.stellarCoreClient.On(
- "SetCursor",
- mock.AnythingOfType("*context.timerCtx"),
- defaultCoreCursorName,
- int32(101),
- ).Return(nil).Once()
-
s.historyQ.On("GetExpStateInvalid", s.ctx).Return(false, nil).Once()
s.historyQ.On("RebuildTradeAggregationBuckets", s.ctx, uint32(101), uint32(101), 0).Return(nil).Once()
+ mockStats := &historyarchive.MockArchiveStats{}
+ mockStats.On("GetBackendName").Return("name")
+ mockStats.On("GetDownloads").Return(uint32(0))
+ mockStats.On("GetRequests").Return(uint32(0))
+ mockStats.On("GetUploads").Return(uint32(0))
+ mockStats.On("GetCacheHits").Return(uint32(0))
+ s.historyAdapter.On("GetStats").Return([]historyarchive.ArchiveStats{mockStats}).Once()
// Reap lookup tables not executed
next, err := resumeState{latestSuccessfullyProcessedLedger: 100}.run(s.system)
@@ -466,13 +420,6 @@ func (s *ResumeTestTestSuite) TestErrorReapingObjectsIgnored() {
s.historyQ.On("UpdateLastLedgerIngest", s.ctx, uint32(101)).Return(nil).Once()
s.historyQ.On("Commit").Return(nil).Once()
- s.stellarCoreClient.On(
- "SetCursor",
- mock.AnythingOfType("*context.timerCtx"),
- defaultCoreCursorName,
- int32(101),
- ).Return(nil).Once()
-
s.historyQ.On("GetExpStateInvalid", s.ctx).Return(false, nil).Once()
s.historyQ.On("RebuildTradeAggregationBuckets", s.ctx, uint32(101), uint32(101), 0).Return(nil).Once()
// Reap lookup tables:
@@ -481,6 +428,13 @@ func (s *ResumeTestTestSuite) TestErrorReapingObjectsIgnored() {
s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(100), nil).Once()
s.historyQ.On("ReapLookupTables", mock.AnythingOfType("*context.timerCtx"), mock.Anything).Return(nil, nil, errors.New("error reaping objects")).Once()
s.historyQ.On("Rollback").Return(nil).Once()
+ mockStats := &historyarchive.MockArchiveStats{}
+ mockStats.On("GetBackendName").Return("name")
+ mockStats.On("GetDownloads").Return(uint32(0))
+ mockStats.On("GetRequests").Return(uint32(0))
+ mockStats.On("GetUploads").Return(uint32(0))
+ mockStats.On("GetCacheHits").Return(uint32(0))
+ s.historyAdapter.On("GetStats").Return([]historyarchive.ArchiveStats{mockStats}).Once()
next, err := resumeState{latestSuccessfullyProcessedLedger: 100}.run(s.system)
s.Assert().NoError(err)
diff --git a/services/horizon/internal/ingest/verify.go b/services/horizon/internal/ingest/verify.go
index bf1ddbe5b5..41b0eb98c5 100644
--- a/services/horizon/internal/ingest/verify.go
+++ b/services/horizon/internal/ingest/verify.go
@@ -157,8 +157,8 @@ func (s *system) verifyState(verifyAgainstLatestCheckpoint bool) error {
}
}
}
- log.WithField("duration", duration).Info("State verification finished")
+ localLog.WithField("duration", duration).Info("State verification finished")
}()
localLog.Info("Creating state reader...")
diff --git a/services/horizon/internal/ingest/verify_test.go b/services/horizon/internal/ingest/verify_test.go
index 901f21a0ca..e3c0e4ec56 100644
--- a/services/horizon/internal/ingest/verify_test.go
+++ b/services/horizon/internal/ingest/verify_test.go
@@ -292,7 +292,7 @@ func TestStateVerifierLockBusy(t *testing.T) {
tt.Assert.NoError(q.BeginTx(tt.Ctx, &sql.TxOptions{}))
checkpointLedger := uint32(63)
- changeProcessor := buildChangeProcessor(q, &ingest.StatsChangeProcessor{}, ledgerSource, checkpointLedger, "")
+ changeProcessor := buildChangeProcessor(q, &ingest.StatsChangeProcessor{}, ledgerSource, checkpointLedger, "", false)
gen := randxdr.NewGenerator()
var changes []xdr.LedgerEntryChange
@@ -350,7 +350,7 @@ func TestStateVerifier(t *testing.T) {
ledger := rand.Int31()
checkpointLedger := uint32(ledger - (ledger % 64) - 1)
- changeProcessor := buildChangeProcessor(q, &ingest.StatsChangeProcessor{}, ledgerSource, checkpointLedger, "")
+ changeProcessor := buildChangeProcessor(q, &ingest.StatsChangeProcessor{}, ledgerSource, checkpointLedger, "", false)
mockChangeReader := &ingest.MockChangeReader{}
gen := randxdr.NewGenerator()
diff --git a/services/horizon/internal/init.go b/services/horizon/internal/init.go
index 5d38c86ccf..4078c7ad00 100644
--- a/services/horizon/internal/init.go
+++ b/services/horizon/internal/init.go
@@ -91,9 +91,7 @@ func mustInitHorizonDB(app *App) {
func initIngester(app *App) {
var err error
- var coreSession db.SessionInterface
app.ingester, err = ingest.NewSystem(ingest.Config{
- CoreSession: coreSession,
HistorySession: mustNewDBSession(
db.IngestSubservice, app.config.DatabaseURL, ingest.MaxDBConnections, ingest.MaxDBConnections, app.prometheusRegistry,
),
@@ -101,12 +99,10 @@ func initIngester(app *App) {
HistoryArchiveURLs: app.config.HistoryArchiveURLs,
CheckpointFrequency: app.config.CheckpointFrequency,
StellarCoreURL: app.config.StellarCoreURL,
- StellarCoreCursor: app.config.CursorName,
CaptiveCoreBinaryPath: app.config.CaptiveCoreBinaryPath,
CaptiveCoreStoragePath: app.config.CaptiveCoreStoragePath,
CaptiveCoreConfigUseDB: app.config.CaptiveCoreConfigUseDB,
CaptiveCoreToml: app.config.CaptiveCoreToml,
- RemoteCaptiveCoreURL: app.config.RemoteCaptiveCoreURL,
DisableStateVerification: app.config.IngestDisableStateVerification,
StateVerificationCheckpointFrequency: uint32(app.config.IngestStateVerificationCheckpointFrequency),
StateVerificationTimeout: app.config.IngestStateVerificationTimeout,
@@ -114,6 +110,7 @@ func initIngester(app *App) {
EnableExtendedLogLedgerStats: app.config.IngestEnableExtendedLogLedgerStats,
RoundingSlippageFilter: app.config.RoundingSlippageFilter,
EnableIngestionFiltering: app.config.EnableIngestionFiltering,
+ SkipSorobanIngestion: app.config.SkipSorobanIngestion,
})
if err != nil {
diff --git a/services/horizon/internal/integration/invokehostfunction_test.go b/services/horizon/internal/integration/invokehostfunction_test.go
index 275f0de23b..1b1edc091a 100644
--- a/services/horizon/internal/integration/invokehostfunction_test.go
+++ b/services/horizon/internal/integration/invokehostfunction_test.go
@@ -3,11 +3,13 @@ package integration
import (
"crypto/sha256"
"encoding/hex"
+ "fmt"
"os"
"path/filepath"
"testing"
"github.com/stellar/go/clients/horizonclient"
+ "github.com/stellar/go/protocols/horizon"
"github.com/stellar/go/protocols/horizon/operations"
"github.com/stellar/go/services/horizon/internal/test/integration"
"github.com/stellar/go/txnbuild"
@@ -24,13 +26,42 @@ const increment_contract = "soroban_increment_contract.wasm"
// Refer to ./services/horizon/internal/integration/contracts/README.md on how to recompile
// contract code if needed to new wasm.
-func TestContractInvokeHostFunctionInstallContract(t *testing.T) {
+func TestInvokeHostFns(t *testing.T) {
+ // first test contracts when soroban processing is enabled
+ DisabledSoroban = false
+ runAllTests(t)
+ // now test same contracts when soroban processing is disabled
+ DisabledSoroban = true
+ runAllTests(t)
+}
+
+func runAllTests(t *testing.T) {
+ tests := []struct {
+ name string
+ fn func(*testing.T)
+ }{
+ {"CaseContractInvokeHostFunctionInstallContract", CaseContractInvokeHostFunctionInstallContract},
+ {"CaseContractInvokeHostFunctionCreateContractByAddress", CaseContractInvokeHostFunctionCreateContractByAddress},
+ {"CaseContractInvokeHostFunctionInvokeStatelessContractFn", CaseContractInvokeHostFunctionInvokeStatelessContractFn},
+ {"CaseContractInvokeHostFunctionInvokeStatefulContractFn", CaseContractInvokeHostFunctionInvokeStatefulContractFn},
+ }
+
+ for _, tt := range tests {
+ t.Run(fmt.Sprintf("Soroban Processing Disabled = %v. ", DisabledSoroban)+tt.name, func(t *testing.T) {
+ tt.fn(t)
+ })
+ }
+}
+
+func CaseContractInvokeHostFunctionInstallContract(t *testing.T) {
if integration.GetCoreMaxSupportedProtocol() < 20 {
t.Skip("This test run does not support less than Protocol 20")
}
itest := integration.NewTest(t, integration.Config{
- ProtocolVersion: 20,
+ ProtocolVersion: 20,
+ HorizonEnvironment: map[string]string{
+ "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)},
EnableSorobanRPC: true,
})
@@ -46,6 +77,7 @@ func TestContractInvokeHostFunctionInstallContract(t *testing.T) {
clientTx, err := itest.Client().TransactionDetail(tx.Hash)
require.NoError(t, err)
+ verifySorobanMeta(t, clientTx)
assert.Equal(t, tx.Hash, clientTx.Hash)
var txResult xdr.TransactionResult
@@ -71,16 +103,17 @@ func TestContractInvokeHostFunctionInstallContract(t *testing.T) {
invokeHostFunctionOpJson, ok := clientInvokeOp.Embedded.Records[0].(operations.InvokeHostFunction)
assert.True(t, ok)
assert.Equal(t, invokeHostFunctionOpJson.Function, "HostFunctionTypeHostFunctionTypeUploadContractWasm")
-
}
-func TestContractInvokeHostFunctionCreateContractByAddress(t *testing.T) {
+func CaseContractInvokeHostFunctionCreateContractByAddress(t *testing.T) {
if integration.GetCoreMaxSupportedProtocol() < 20 {
t.Skip("This test run does not support less than Protocol 20")
}
itest := integration.NewTest(t, integration.Config{
- ProtocolVersion: 20,
+ ProtocolVersion: 20,
+ HorizonEnvironment: map[string]string{
+ "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)},
EnableSorobanRPC: true,
})
@@ -103,6 +136,7 @@ func TestContractInvokeHostFunctionCreateContractByAddress(t *testing.T) {
clientTx, err := itest.Client().TransactionDetail(tx.Hash)
require.NoError(t, err)
+ verifySorobanMeta(t, clientTx)
assert.Equal(t, tx.Hash, clientTx.Hash)
var txResult xdr.TransactionResult
@@ -128,13 +162,15 @@ func TestContractInvokeHostFunctionCreateContractByAddress(t *testing.T) {
assert.Equal(t, invokeHostFunctionOpJson.Salt, "110986164698320180327942133831752629430491002266485370052238869825166557303060")
}
-func TestContractInvokeHostFunctionInvokeStatelessContractFn(t *testing.T) {
+func CaseContractInvokeHostFunctionInvokeStatelessContractFn(t *testing.T) {
if integration.GetCoreMaxSupportedProtocol() < 20 {
t.Skip("This test run does not support less than Protocol 20")
}
itest := integration.NewTest(t, integration.Config{
- ProtocolVersion: 20,
+ ProtocolVersion: 20,
+ HorizonEnvironment: map[string]string{
+ "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)},
EnableSorobanRPC: true,
})
@@ -196,6 +232,7 @@ func TestContractInvokeHostFunctionInvokeStatelessContractFn(t *testing.T) {
clientTx, err := itest.Client().TransactionDetail(tx.Hash)
require.NoError(t, err)
+ verifySorobanMeta(t, clientTx)
assert.Equal(t, tx.Hash, clientTx.Hash)
var txResult xdr.TransactionResult
@@ -209,12 +246,14 @@ func TestContractInvokeHostFunctionInvokeStatelessContractFn(t *testing.T) {
assert.True(t, ok)
assert.Equal(t, invokeHostFunctionResult.Code, xdr.InvokeHostFunctionResultCodeInvokeHostFunctionSuccess)
- // check the function response, should have summed the two input numbers
- invokeResult := xdr.Uint64(9)
- expectedScVal := xdr.ScVal{Type: xdr.ScValTypeScvU64, U64: &invokeResult}
- var transactionMeta xdr.TransactionMeta
- assert.NoError(t, xdr.SafeUnmarshalBase64(tx.ResultMetaXdr, &transactionMeta))
- assert.True(t, expectedScVal.Equals(transactionMeta.V3.SorobanMeta.ReturnValue))
+ if !DisabledSoroban {
+ // check the function response, should have summed the two input numbers
+ invokeResult := xdr.Uint64(9)
+ expectedScVal := xdr.ScVal{Type: xdr.ScValTypeScvU64, U64: &invokeResult}
+ var transactionMeta xdr.TransactionMeta
+ assert.NoError(t, xdr.SafeUnmarshalBase64(tx.ResultMetaXdr, &transactionMeta))
+ assert.True(t, expectedScVal.Equals(transactionMeta.V3.SorobanMeta.ReturnValue))
+ }
clientInvokeOp, err := itest.Client().Operations(horizonclient.OperationRequest{
ForTransaction: tx.Hash,
@@ -237,13 +276,15 @@ func TestContractInvokeHostFunctionInvokeStatelessContractFn(t *testing.T) {
assert.Equal(t, invokeHostFunctionOpJson.Parameters[3].Type, "U64")
}
-func TestContractInvokeHostFunctionInvokeStatefulContractFn(t *testing.T) {
+func CaseContractInvokeHostFunctionInvokeStatefulContractFn(t *testing.T) {
if integration.GetCoreMaxSupportedProtocol() < 20 {
t.Skip("This test run does not support less than Protocol 20")
}
itest := integration.NewTest(t, integration.Config{
- ProtocolVersion: 20,
+ ProtocolVersion: 20,
+ HorizonEnvironment: map[string]string{
+ "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)},
EnableSorobanRPC: true,
})
@@ -292,6 +333,7 @@ func TestContractInvokeHostFunctionInvokeStatefulContractFn(t *testing.T) {
clientTx, err := itest.Client().TransactionDetail(tx.Hash)
require.NoError(t, err)
+ verifySorobanMeta(t, clientTx)
assert.Equal(t, tx.Hash, clientTx.Hash)
var txResult xdr.TransactionResult
@@ -305,12 +347,14 @@ func TestContractInvokeHostFunctionInvokeStatefulContractFn(t *testing.T) {
assert.True(t, ok)
assert.Equal(t, invokeHostFunctionResult.Code, xdr.InvokeHostFunctionResultCodeInvokeHostFunctionSuccess)
- // check the function response, should have incremented state from 0 to 1
- invokeResult := xdr.Uint32(1)
- expectedScVal := xdr.ScVal{Type: xdr.ScValTypeScvU32, U32: &invokeResult}
- var transactionMeta xdr.TransactionMeta
- assert.NoError(t, xdr.SafeUnmarshalBase64(clientTx.ResultMetaXdr, &transactionMeta))
- assert.True(t, expectedScVal.Equals(transactionMeta.V3.SorobanMeta.ReturnValue))
+ if !DisabledSoroban {
+ // check the function response, should have incremented state from 0 to 1
+ invokeResult := xdr.Uint32(1)
+ expectedScVal := xdr.ScVal{Type: xdr.ScValTypeScvU32, U32: &invokeResult}
+ var transactionMeta xdr.TransactionMeta
+ assert.NoError(t, xdr.SafeUnmarshalBase64(clientTx.ResultMetaXdr, &transactionMeta))
+ assert.True(t, expectedScVal.Equals(transactionMeta.V3.SorobanMeta.ReturnValue))
+ }
clientInvokeOp, err := itest.Client().Operations(horizonclient.OperationRequest{
ForTransaction: tx.Hash,
@@ -384,3 +428,20 @@ func assembleCreateContractOp(t *testing.T, sourceAccount string, wasmFileName s
SourceAccount: sourceAccount,
}
}
+
+func verifySorobanMeta(t *testing.T, clientTx horizon.Transaction) {
+ var txMeta xdr.TransactionMeta
+ err := xdr.SafeUnmarshalBase64(clientTx.ResultMetaXdr, &txMeta)
+ require.NoError(t, err)
+ require.NotNil(t, txMeta.V3)
+
+ if !DisabledSoroban {
+ require.NotNil(t, txMeta.V3.SorobanMeta)
+ return
+ }
+
+ require.Empty(t, txMeta.V3.Operations)
+ require.Empty(t, txMeta.V3.TxChangesAfter)
+ require.Empty(t, txMeta.V3.TxChangesBefore)
+ require.Nil(t, txMeta.V3.SorobanMeta)
+}
diff --git a/services/horizon/internal/integration/parameters_test.go b/services/horizon/internal/integration/parameters_test.go
index 97fab268bc..ebe3c3bfda 100644
--- a/services/horizon/internal/integration/parameters_test.go
+++ b/services/horizon/internal/integration/parameters_test.go
@@ -541,84 +541,6 @@ func TestDeprecatedOutputs(t *testing.T) {
"Configuring section in the developer documentation on how to use them - "+
"https://developers.stellar.org/docs/run-api-server/configuring")
})
- t.Run("deprecated output for --stellar-core-db-url and --enable-captive-core-ingestion", func(t *testing.T) {
- originalStderr := os.Stderr
- r, w, _ := os.Pipe()
- os.Stderr = w
- stdLog.SetOutput(os.Stderr)
-
- testConfig := integration.GetTestConfig()
- testConfig.HorizonIngestParameters = map[string]string{
- "stellar-core-db-url": "temp-url",
- "enable-captive-core-ingestion": "true",
- }
- test := integration.NewTest(t, *testConfig)
- err := test.StartHorizon()
- assert.NoError(t, err)
- test.WaitForHorizon()
-
- // Use a wait group to wait for the goroutine to finish before proceeding
- var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- defer wg.Done()
- if err := w.Close(); err != nil {
- t.Errorf("Failed to close Stdout")
- return
- }
- }()
-
- outputBytes, _ := io.ReadAll(r)
- wg.Wait() // Wait for the goroutine to finish before proceeding
- _ = r.Close()
- os.Stderr = originalStderr
-
- assert.Contains(t, string(outputBytes), "DEPRECATED - The usage of the flag --stellar-core-db-url has been deprecated. "+
- "Horizon now uses Captive-Core ingestion by default and this flag will soon be removed in "+
- "the future.")
- assert.Contains(t, string(outputBytes), "DEPRECATED - The usage of the flag --enable-captive-core-ingestion has been deprecated. "+
- "Horizon now uses Captive-Core ingestion by default and this flag will soon be removed in "+
- "the future.")
- })
- t.Run("deprecated output for env vars STELLAR_CORE_DATABASE_URL and ENABLE_CAPTIVE_CORE_INGESTION", func(t *testing.T) {
- originalStderr := os.Stderr
- r, w, _ := os.Pipe()
- os.Stderr = w
- stdLog.SetOutput(os.Stderr)
-
- testConfig := integration.GetTestConfig()
- testConfig.HorizonEnvironment = map[string]string{
- "STELLAR_CORE_DATABASE_URL": "temp-url",
- "ENABLE_CAPTIVE_CORE_INGESTION": "true",
- }
- test := integration.NewTest(t, *testConfig)
- err := test.StartHorizon()
- assert.NoError(t, err)
- test.WaitForHorizon()
-
- // Use a wait group to wait for the goroutine to finish before proceeding
- var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- defer wg.Done()
- if err := w.Close(); err != nil {
- t.Errorf("Failed to close Stdout")
- return
- }
- }()
-
- outputBytes, _ := io.ReadAll(r)
- wg.Wait() // Wait for the goroutine to finish before proceeding
- _ = r.Close()
- os.Stderr = originalStderr
-
- assert.Contains(t, string(outputBytes), "DEPRECATED - The usage of the flag --stellar-core-db-url has been deprecated. "+
- "Horizon now uses Captive-Core ingestion by default and this flag will soon be removed in "+
- "the future.")
- assert.Contains(t, string(outputBytes), "DEPRECATED - The usage of the flag --enable-captive-core-ingestion has been deprecated. "+
- "Horizon now uses Captive-Core ingestion by default and this flag will soon be removed in "+
- "the future.")
- })
}
func TestGlobalFlagsOutput(t *testing.T) {
diff --git a/services/horizon/internal/integration/sac_test.go b/services/horizon/internal/integration/sac_test.go
index 64c772b44c..c790b5a54c 100644
--- a/services/horizon/internal/integration/sac_test.go
+++ b/services/horizon/internal/integration/sac_test.go
@@ -2,6 +2,7 @@ package integration
import (
"context"
+ "fmt"
"math"
"math/big"
"strings"
@@ -30,19 +31,127 @@ const sac_contract = "soroban_sac_test.wasm"
// of the integration tests.
const LongTermTTL = 10000
+var (
+ DisabledSoroban bool
+)
+
+func TestSAC(t *testing.T) {
+ // first test contracts when soroban processing is enabled
+ DisabledSoroban = false
+ runAllSACTests(t)
+ // now test same contracts when soroban processing is disabled
+ DisabledSoroban = true
+ runAllSACTests(t)
+}
+
+func runAllSACTests(t *testing.T) {
+ tests := []struct {
+ name string
+ fn func(*testing.T)
+ }{
+ {"CaseContractMintToAccount", CaseContractMintToAccount},
+ {"CaseContractMintToContract", CaseContractMintToContract},
+ {"CaseExpirationAndRestoration", CaseExpirationAndRestoration},
+ {"CaseContractTransferBetweenAccounts", CaseContractTransferBetweenAccounts},
+ {"CaseContractTransferBetweenAccountAndContract", CaseContractTransferBetweenAccountAndContract},
+ {"CaseContractTransferBetweenContracts", CaseContractTransferBetweenContracts},
+ {"CaseContractBurnFromAccount", CaseContractBurnFromAccount},
+ {"CaseContractBurnFromContract", CaseContractBurnFromContract},
+ {"CaseContractClawbackFromAccount", CaseContractClawbackFromAccount},
+ {"CaseContractClawbackFromContract", CaseContractClawbackFromContract},
+ }
+
+ for _, tt := range tests {
+ t.Run(fmt.Sprintf("Soroban Processing Disabled = %v. ", DisabledSoroban)+tt.name, func(t *testing.T) {
+ tt.fn(t)
+ })
+ }
+}
+
// Tests use precompiled wasm bin files that are added to the testdata directory.
// Refer to ./services/horizon/internal/integration/contracts/README.md on how to recompile
// contract code if needed to new wasm.
-func TestContractMintToAccount(t *testing.T) {
+func createSAC(itest *integration.Test, asset xdr.Asset) {
+ invokeHostFunction := &txnbuild.InvokeHostFunction{
+ HostFunction: xdr.HostFunction{
+ Type: xdr.HostFunctionTypeHostFunctionTypeCreateContract,
+ CreateContract: &xdr.CreateContractArgs{
+ ContractIdPreimage: xdr.ContractIdPreimage{
+ Type: xdr.ContractIdPreimageTypeContractIdPreimageFromAsset,
+ FromAsset: &asset,
+ },
+ Executable: xdr.ContractExecutable{
+ Type: xdr.ContractExecutableTypeContractExecutableStellarAsset,
+ WasmHash: nil,
+ },
+ },
+ },
+ SourceAccount: itest.Master().Address(),
+ }
+ _, _, preFlightOp := assertInvokeHostFnSucceeds(itest, itest.Master(), invokeHostFunction)
+ sourceAccount, extendTTLOp, minFee := itest.PreflightExtendExpiration(
+ itest.Master().Address(),
+ preFlightOp.Ext.SorobanData.Resources.Footprint.ReadWrite,
+ LongTermTTL,
+ )
+ itest.MustSubmitOperationsWithFee(&sourceAccount, itest.Master(), minFee+txnbuild.MinBaseFee, &extendTTLOp)
+}
+
+func invokeStoreSet(
+ itest *integration.Test,
+ storeContractID xdr.Hash,
+ ledgerEntryData xdr.LedgerEntryData,
+) *txnbuild.InvokeHostFunction {
+ key := ledgerEntryData.MustContractData().Key
+ val := ledgerEntryData.MustContractData().Val
+ return &txnbuild.InvokeHostFunction{
+ HostFunction: xdr.HostFunction{
+ Type: xdr.HostFunctionTypeHostFunctionTypeInvokeContract,
+ InvokeContract: &xdr.InvokeContractArgs{
+ ContractAddress: contractIDParam(storeContractID),
+ FunctionName: "set",
+ Args: xdr.ScVec{
+ key,
+ val,
+ },
+ },
+ },
+ SourceAccount: itest.Master().Address(),
+ }
+}
+
+func invokeStoreRemove(
+ itest *integration.Test,
+ storeContractID xdr.Hash,
+ ledgerKey xdr.LedgerKey,
+) *txnbuild.InvokeHostFunction {
+ return &txnbuild.InvokeHostFunction{
+ HostFunction: xdr.HostFunction{
+ Type: xdr.HostFunctionTypeHostFunctionTypeInvokeContract,
+ InvokeContract: &xdr.InvokeContractArgs{
+ ContractAddress: contractIDParam(storeContractID),
+ FunctionName: "remove",
+ Args: xdr.ScVec{
+ ledgerKey.MustContractData().Key,
+ },
+ },
+ },
+ SourceAccount: itest.Master().Address(),
+ }
+}
+
+func CaseContractMintToAccount(t *testing.T) {
if integration.GetCoreMaxSupportedProtocol() < 20 {
t.Skip("This test run does not support less than Protocol 20")
}
itest := integration.NewTest(t, integration.Config{
- ProtocolVersion: 20,
- HorizonEnvironment: map[string]string{"INGEST_DISABLE_STATE_VERIFICATION": "true", "CONNECTION_TIMEOUT": "360000"},
- EnableSorobanRPC: true,
+ ProtocolVersion: 20,
+ HorizonEnvironment: map[string]string{
+ "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban),
+ },
+ EnableSorobanRPC: true,
})
issuer := itest.Master().Address()
@@ -72,17 +181,22 @@ func TestContractMintToAccount(t *testing.T) {
balanceContracts: big.NewInt(0),
contractID: stellarAssetContractID(itest, asset),
})
-
- fx := getTxEffects(itest, mintTx, asset)
- require.Len(t, fx, 1)
- creditEffect := assertContainsEffect(t, fx,
- effects.EffectAccountCredited)[0].(effects.AccountCredited)
- assert.Equal(t, recipientKp.Address(), creditEffect.Account)
- assert.Equal(t, issuer, creditEffect.Asset.Issuer)
- assert.Equal(t, code, creditEffect.Asset.Code)
- assert.Equal(t, "20.0000000", creditEffect.Amount)
assertEventPayments(itest, mintTx, asset, "", recipient.GetAccountID(), "mint", "20.0000000")
+ if !DisabledSoroban {
+ fx := getTxEffects(itest, mintTx, asset)
+ require.Len(t, fx, 1)
+ creditEffect := assertContainsEffect(t, fx,
+ effects.EffectAccountCredited)[0].(effects.AccountCredited)
+ assert.Equal(t, recipientKp.Address(), creditEffect.Account)
+ assert.Equal(t, issuer, creditEffect.Asset.Issuer)
+ assert.Equal(t, code, creditEffect.Asset.Code)
+ assert.Equal(t, "20.0000000", creditEffect.Amount)
+ } else {
+ fx := getTxEffects(itest, mintTx, asset)
+ require.Len(t, fx, 0)
+ }
+
otherRecipientKp, otherRecipient := itest.CreateAccount("100")
itest.MustEstablishTrustline(otherRecipientKp, otherRecipient, txnbuild.MustAssetFromXDR(asset))
@@ -94,12 +208,6 @@ func TestContractMintToAccount(t *testing.T) {
)
assertContainsBalance(itest, recipientKp, issuer, code, amount.MustParse("20"))
assertContainsBalance(itest, otherRecipientKp, issuer, code, amount.MustParse("30"))
-
- fx = getTxEffects(itest, transferTx, asset)
- assert.Len(t, fx, 2)
- assertContainsEffect(t, fx,
- effects.EffectAccountCredited,
- effects.EffectAccountDebited)
assertAssetStats(itest, assetStats{
code: code,
issuer: issuer,
@@ -111,41 +219,28 @@ func TestContractMintToAccount(t *testing.T) {
balanceContracts: big.NewInt(0),
contractID: stellarAssetContractID(itest, asset),
})
-}
-func createSAC(itest *integration.Test, asset xdr.Asset) {
- invokeHostFunction := &txnbuild.InvokeHostFunction{
- HostFunction: xdr.HostFunction{
- Type: xdr.HostFunctionTypeHostFunctionTypeCreateContract,
- CreateContract: &xdr.CreateContractArgs{
- ContractIdPreimage: xdr.ContractIdPreimage{
- Type: xdr.ContractIdPreimageTypeContractIdPreimageFromAsset,
- FromAsset: &asset,
- },
- Executable: xdr.ContractExecutable{
- Type: xdr.ContractExecutableTypeContractExecutableStellarAsset,
- WasmHash: nil,
- },
- },
- },
- SourceAccount: itest.Master().Address(),
+ if !DisabledSoroban {
+ fx := getTxEffects(itest, transferTx, asset)
+ assert.Len(t, fx, 2)
+ assertContainsEffect(t, fx,
+ effects.EffectAccountCredited,
+ effects.EffectAccountDebited)
+ } else {
+ fx := getTxEffects(itest, transferTx, asset)
+ require.Len(t, fx, 0)
}
- _, _, preFlightOp := assertInvokeHostFnSucceeds(itest, itest.Master(), invokeHostFunction)
- sourceAccount, extendTTLOp, minFee := itest.PreflightExtendExpiration(
- itest.Master().Address(),
- preFlightOp.Ext.SorobanData.Resources.Footprint.ReadWrite,
- LongTermTTL,
- )
- itest.MustSubmitOperationsWithFee(&sourceAccount, itest.Master(), minFee+txnbuild.MinBaseFee, &extendTTLOp)
}
-func TestContractMintToContract(t *testing.T) {
+func CaseContractMintToContract(t *testing.T) {
if integration.GetCoreMaxSupportedProtocol() < 20 {
t.Skip("This test run does not support less than Protocol 20")
}
itest := integration.NewTest(t, integration.Config{
- ProtocolVersion: 20,
+ ProtocolVersion: 20,
+ HorizonEnvironment: map[string]string{
+ "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)},
EnableSorobanRPC: true,
})
@@ -170,19 +265,25 @@ func TestContractMintToContract(t *testing.T) {
i128Param(int64(mintAmount.Hi), uint64(mintAmount.Lo)),
contractAddressParam(recipientContractID)),
)
- assertContainsEffect(t, getTxEffects(itest, mintTx, asset),
- effects.EffectContractCredited)
- balanceAmount, _, _ := assertInvokeHostFnSucceeds(
- itest,
- itest.Master(),
- contractBalance(itest, issuer, asset, recipientContractID),
- )
- assert.Equal(itest.CurrentTest(), xdr.ScValTypeScvI128, balanceAmount.Type)
- assert.Equal(itest.CurrentTest(), xdr.Uint64(math.MaxUint64-3), (*balanceAmount.I128).Lo)
- assert.Equal(itest.CurrentTest(), xdr.Int64(math.MaxInt64), (*balanceAmount.I128).Hi)
assertEventPayments(itest, mintTx, asset, "", strkeyRecipientContractID, "mint", amount.String128(mintAmount))
+ if !DisabledSoroban {
+ assertContainsEffect(t, getTxEffects(itest, mintTx, asset),
+ effects.EffectContractCredited)
+
+ balanceAmount, _, _ := assertInvokeHostFnSucceeds(
+ itest,
+ itest.Master(),
+ contractBalance(itest, issuer, asset, recipientContractID),
+ )
+ assert.Equal(itest.CurrentTest(), xdr.ScValTypeScvI128, balanceAmount.Type)
+ assert.Equal(itest.CurrentTest(), xdr.Uint64(math.MaxUint64-3), (*balanceAmount.I128).Lo)
+ assert.Equal(itest.CurrentTest(), xdr.Int64(math.MaxInt64), (*balanceAmount.I128).Hi)
+ } else {
+ fx := getTxEffects(itest, mintTx, asset)
+ require.Len(t, fx, 0)
+ }
// calling transfer from the issuer account will also mint the asset
_, transferTx, _ := assertInvokeHostFnSucceeds(
itest,
@@ -190,19 +291,6 @@ func TestContractMintToContract(t *testing.T) {
transferWithAmount(itest, issuer, asset, i128Param(0, 3), contractAddressParam(recipientContractID)),
)
- assertContainsEffect(t, getTxEffects(itest, transferTx, asset),
- effects.EffectAccountDebited,
- effects.EffectContractCredited)
-
- balanceAmount, _, _ = assertInvokeHostFnSucceeds(
- itest,
- itest.Master(),
- contractBalance(itest, issuer, asset, recipientContractID),
- )
- assert.Equal(itest.CurrentTest(), xdr.ScValTypeScvI128, balanceAmount.Type)
- assert.Equal(itest.CurrentTest(), xdr.Uint64(math.MaxUint64), (*balanceAmount.I128).Lo)
- assert.Equal(itest.CurrentTest(), xdr.Int64(math.MaxInt64), (*balanceAmount.I128).Hi)
-
// 2^127 - 1
balanceContracts := new(big.Int).Lsh(big.NewInt(1), 127)
balanceContracts.Sub(balanceContracts, big.NewInt(1))
@@ -217,9 +305,27 @@ func TestContractMintToContract(t *testing.T) {
balanceContracts: balanceContracts,
contractID: stellarAssetContractID(itest, asset),
})
+
+ if !DisabledSoroban {
+ assertContainsEffect(t, getTxEffects(itest, transferTx, asset),
+ effects.EffectAccountDebited,
+ effects.EffectContractCredited)
+
+ balanceAmount, _, _ := assertInvokeHostFnSucceeds(
+ itest,
+ itest.Master(),
+ contractBalance(itest, issuer, asset, recipientContractID),
+ )
+ assert.Equal(itest.CurrentTest(), xdr.ScValTypeScvI128, balanceAmount.Type)
+ assert.Equal(itest.CurrentTest(), xdr.Uint64(math.MaxUint64), (*balanceAmount.I128).Lo)
+ assert.Equal(itest.CurrentTest(), xdr.Int64(math.MaxInt64), (*balanceAmount.I128).Hi)
+ } else {
+ fx := getTxEffects(itest, transferTx, asset)
+ require.Len(t, fx, 0)
+ }
}
-func TestExpirationAndRestoration(t *testing.T) {
+func CaseExpirationAndRestoration(t *testing.T) {
if integration.GetCoreMaxSupportedProtocol() < 20 {
t.Skip("This test run does not support less than Protocol 20")
}
@@ -232,6 +338,7 @@ func TestExpirationAndRestoration(t *testing.T) {
// a fake asset contract in the horizon db and we don't
// want state verification to detect this
"ingest-disable-state-verification": "true",
+ "disable-soroban-ingest": fmt.Sprint(DisabledSoroban),
},
})
@@ -294,6 +401,7 @@ func TestExpirationAndRestoration(t *testing.T) {
LongTermTTL,
)
itest.MustSubmitOperationsWithFee(&sourceAccount, itest.Master(), minFee+txnbuild.MinBaseFee, &extendTTLOp)
+
assertAssetStats(itest, assetStats{
code: code,
issuer: issuer,
@@ -321,6 +429,16 @@ func TestExpirationAndRestoration(t *testing.T) {
balanceToExpire,
),
)
+
+ balanceToExpireLedgerKey := xdr.LedgerKey{
+ Type: xdr.LedgerEntryTypeContractData,
+ ContractData: &xdr.LedgerKeyContractData{
+ Contract: balanceToExpire.ContractData.Contract,
+ Key: balanceToExpire.ContractData.Key,
+ Durability: balanceToExpire.ContractData.Durability,
+ },
+ }
+
assertAssetStats(itest, assetStats{
code: code,
issuer: issuer,
@@ -333,14 +451,6 @@ func TestExpirationAndRestoration(t *testing.T) {
contractID: storeContractID,
})
- balanceToExpireLedgerKey := xdr.LedgerKey{
- Type: xdr.LedgerEntryTypeContractData,
- ContractData: &xdr.LedgerKeyContractData{
- Contract: balanceToExpire.ContractData.Contract,
- Key: balanceToExpire.ContractData.Key,
- Durability: balanceToExpire.ContractData.Durability,
- },
- }
// The TESTING_MINIMUM_PERSISTENT_ENTRY_LIFETIME=10 configuration in stellar-core
// will ensure that the ledger entry expires after 10 ledgers.
// Because ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING is set to true, 10 ledgers
@@ -372,6 +482,7 @@ func TestExpirationAndRestoration(t *testing.T) {
),
),
)
+
assertAssetStats(itest, assetStats{
code: code,
issuer: issuer,
@@ -390,6 +501,7 @@ func TestExpirationAndRestoration(t *testing.T) {
balanceToExpireLedgerKey,
)
itest.MustSubmitOperationsWithFee(&sourceAccount, itest.Master(), minFee+txnbuild.MinBaseFee, &restoreFootprint)
+
assertAssetStats(itest, assetStats{
code: code,
issuer: issuer,
@@ -419,6 +531,7 @@ func TestExpirationAndRestoration(t *testing.T) {
),
),
)
+
assertAssetStats(itest, assetStats{
code: code,
issuer: issuer,
@@ -444,6 +557,7 @@ func TestExpirationAndRestoration(t *testing.T) {
),
),
)
+
assertAssetStats(itest, assetStats{
code: code,
issuer: issuer,
@@ -457,56 +571,15 @@ func TestExpirationAndRestoration(t *testing.T) {
})
}
-func invokeStoreSet(
- itest *integration.Test,
- storeContractID xdr.Hash,
- ledgerEntryData xdr.LedgerEntryData,
-) *txnbuild.InvokeHostFunction {
- key := ledgerEntryData.MustContractData().Key
- val := ledgerEntryData.MustContractData().Val
- return &txnbuild.InvokeHostFunction{
- HostFunction: xdr.HostFunction{
- Type: xdr.HostFunctionTypeHostFunctionTypeInvokeContract,
- InvokeContract: &xdr.InvokeContractArgs{
- ContractAddress: contractIDParam(storeContractID),
- FunctionName: "set",
- Args: xdr.ScVec{
- key,
- val,
- },
- },
- },
- SourceAccount: itest.Master().Address(),
- }
-}
-
-func invokeStoreRemove(
- itest *integration.Test,
- storeContractID xdr.Hash,
- ledgerKey xdr.LedgerKey,
-) *txnbuild.InvokeHostFunction {
- return &txnbuild.InvokeHostFunction{
- HostFunction: xdr.HostFunction{
- Type: xdr.HostFunctionTypeHostFunctionTypeInvokeContract,
- InvokeContract: &xdr.InvokeContractArgs{
- ContractAddress: contractIDParam(storeContractID),
- FunctionName: "remove",
- Args: xdr.ScVec{
- ledgerKey.MustContractData().Key,
- },
- },
- },
- SourceAccount: itest.Master().Address(),
- }
-}
-
-func TestContractTransferBetweenAccounts(t *testing.T) {
+func CaseContractTransferBetweenAccounts(t *testing.T) {
if integration.GetCoreMaxSupportedProtocol() < 20 {
t.Skip("This test run does not support less than Protocol 20")
}
itest := integration.NewTest(t, integration.Config{
- ProtocolVersion: 20,
+ ProtocolVersion: 20,
+ HorizonEnvironment: map[string]string{
+ "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)},
EnableSorobanRPC: true,
})
@@ -534,6 +607,7 @@ func TestContractTransferBetweenAccounts(t *testing.T) {
)
assertContainsBalance(itest, recipientKp, issuer, code, amount.MustParse("1000"))
+
assertAssetStats(itest, assetStats{
code: code,
issuer: issuer,
@@ -557,10 +631,6 @@ func TestContractTransferBetweenAccounts(t *testing.T) {
assertContainsBalance(itest, recipientKp, issuer, code, amount.MustParse("970"))
assertContainsBalance(itest, otherRecipientKp, issuer, code, amount.MustParse("30"))
-
- fx := getTxEffects(itest, transferTx, asset)
- assert.NotEmpty(t, fx)
- assertContainsEffect(t, fx, effects.EffectAccountCredited, effects.EffectAccountDebited)
assertAssetStats(itest, assetStats{
code: code,
issuer: issuer,
@@ -573,15 +643,26 @@ func TestContractTransferBetweenAccounts(t *testing.T) {
contractID: stellarAssetContractID(itest, asset),
})
assertEventPayments(itest, transferTx, asset, recipientKp.Address(), otherRecipient.GetAccountID(), "transfer", "30.0000000")
+
+ if !DisabledSoroban {
+ fx := getTxEffects(itest, transferTx, asset)
+ assert.NotEmpty(t, fx)
+ assertContainsEffect(t, fx, effects.EffectAccountCredited, effects.EffectAccountDebited)
+ } else {
+ fx := getTxEffects(itest, transferTx, asset)
+ require.Len(t, fx, 0)
+ }
}
-func TestContractTransferBetweenAccountAndContract(t *testing.T) {
+func CaseContractTransferBetweenAccountAndContract(t *testing.T) {
if integration.GetCoreMaxSupportedProtocol() < 20 {
t.Skip("This test run does not support less than Protocol 20")
}
itest := integration.NewTest(t, integration.Config{
- ProtocolVersion: 20,
+ ProtocolVersion: 20,
+ HorizonEnvironment: map[string]string{
+ "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)},
EnableSorobanRPC: true,
})
@@ -627,9 +708,6 @@ func TestContractTransferBetweenAccountAndContract(t *testing.T) {
mint(itest, issuer, asset, "1000", contractAddressParam(recipientContractID)),
)
assertContainsBalance(itest, recipientKp, issuer, code, amount.MustParse("1000"))
- assertContainsEffect(t, getTxEffects(itest, mintTx, asset),
- effects.EffectContractCredited)
-
assertAssetStats(itest, assetStats{
code: code,
issuer: issuer,
@@ -642,6 +720,14 @@ func TestContractTransferBetweenAccountAndContract(t *testing.T) {
contractID: stellarAssetContractID(itest, asset),
})
+ if !DisabledSoroban {
+ assertContainsEffect(t, getTxEffects(itest, mintTx, asset),
+ effects.EffectContractCredited)
+ } else {
+ fx := getTxEffects(itest, mintTx, asset)
+ require.Len(t, fx, 0)
+ }
+
// transfer from account to contract
_, transferTx, _ := assertInvokeHostFnSucceeds(
itest,
@@ -649,8 +735,6 @@ func TestContractTransferBetweenAccountAndContract(t *testing.T) {
transfer(itest, recipientKp.Address(), asset, "30", contractAddressParam(recipientContractID)),
)
assertContainsBalance(itest, recipientKp, issuer, code, amount.MustParse("970"))
- assertContainsEffect(t, getTxEffects(itest, transferTx, asset),
- effects.EffectAccountDebited, effects.EffectContractCredited)
assertAssetStats(itest, assetStats{
code: code,
issuer: issuer,
@@ -664,14 +748,19 @@ func TestContractTransferBetweenAccountAndContract(t *testing.T) {
})
assertEventPayments(itest, transferTx, asset, recipientKp.Address(), strkeyRecipientContractID, "transfer", "30.0000000")
+ if !DisabledSoroban {
+ assertContainsEffect(t, getTxEffects(itest, transferTx, asset),
+ effects.EffectAccountDebited, effects.EffectContractCredited)
+ } else {
+ fx := getTxEffects(itest, transferTx, asset)
+ require.Len(t, fx, 0)
+ }
// transfer from contract to account
_, transferTx, _ = assertInvokeHostFnSucceeds(
itest,
recipientKp,
transferFromContract(itest, recipientKp.Address(), asset, recipientContractID, recipientContractHash, "500", accountAddressParam(recipient.GetAccountID())),
)
- assertContainsEffect(t, getTxEffects(itest, transferTx, asset),
- effects.EffectContractDebited, effects.EffectAccountCredited)
assertContainsBalance(itest, recipientKp, issuer, code, amount.MustParse("1470"))
assertAssetStats(itest, assetStats{
code: code,
@@ -686,6 +775,13 @@ func TestContractTransferBetweenAccountAndContract(t *testing.T) {
})
assertEventPayments(itest, transferTx, asset, strkeyRecipientContractID, recipientKp.Address(), "transfer", "500.0000000")
+ if DisabledSoroban {
+ fx := getTxEffects(itest, transferTx, asset)
+ require.Len(t, fx, 0)
+ return
+ }
+ assertContainsEffect(t, getTxEffects(itest, transferTx, asset),
+ effects.EffectContractDebited, effects.EffectAccountCredited)
balanceAmount, _, _ := assertInvokeHostFnSucceeds(
itest,
itest.Master(),
@@ -696,13 +792,15 @@ func TestContractTransferBetweenAccountAndContract(t *testing.T) {
assert.Equal(itest.CurrentTest(), xdr.Int64(0), (*balanceAmount.I128).Hi)
}
-func TestContractTransferBetweenContracts(t *testing.T) {
+func CaseContractTransferBetweenContracts(t *testing.T) {
if integration.GetCoreMaxSupportedProtocol() < 20 {
t.Skip("This test run does not support less than Protocol 20")
}
itest := integration.NewTest(t, integration.Config{
- ProtocolVersion: 20,
+ ProtocolVersion: 20,
+ HorizonEnvironment: map[string]string{
+ "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)},
EnableSorobanRPC: true,
})
@@ -742,8 +840,28 @@ func TestContractTransferBetweenContracts(t *testing.T) {
itest.Master(),
transferFromContract(itest, issuer, asset, emitterContractID, emitterContractHash, "10", contractAddressParam(recipientContractID)),
)
- assertContainsEffect(t, getTxEffects(itest, transferTx, asset),
- effects.EffectContractCredited, effects.EffectContractDebited)
+
+ assertAssetStats(itest, assetStats{
+ code: code,
+ issuer: issuer,
+ numAccounts: 0,
+ balanceAccounts: 0,
+ balanceArchivedContracts: big.NewInt(0),
+ numArchivedContracts: 0,
+ numContracts: 2,
+ balanceContracts: big.NewInt(int64(amount.MustParse("1000"))),
+ contractID: stellarAssetContractID(itest, asset),
+ })
+ assertEventPayments(itest, transferTx, asset, strkeyEmitterContractID, strkeyRecipientContractID, "transfer", "10.0000000")
+
+ if !DisabledSoroban {
+ assertContainsEffect(t, getTxEffects(itest, transferTx, asset),
+ effects.EffectContractCredited, effects.EffectContractDebited)
+ } else {
+ fx := getTxEffects(itest, transferTx, asset)
+ require.Len(t, fx, 0)
+ return
+ }
// Check balances of emitter and recipient
emitterBalanceAmount, _, _ := assertInvokeHostFnSucceeds(
@@ -763,28 +881,17 @@ func TestContractTransferBetweenContracts(t *testing.T) {
assert.Equal(itest.CurrentTest(), xdr.ScValTypeScvI128, recipientBalanceAmount.Type)
assert.Equal(itest.CurrentTest(), xdr.Uint64(100000000), (*recipientBalanceAmount.I128).Lo)
assert.Equal(itest.CurrentTest(), xdr.Int64(0), (*recipientBalanceAmount.I128).Hi)
-
- assertAssetStats(itest, assetStats{
- code: code,
- issuer: issuer,
- numAccounts: 0,
- balanceAccounts: 0,
- balanceArchivedContracts: big.NewInt(0),
- numArchivedContracts: 0,
- numContracts: 2,
- balanceContracts: big.NewInt(int64(amount.MustParse("1000"))),
- contractID: stellarAssetContractID(itest, asset),
- })
- assertEventPayments(itest, transferTx, asset, strkeyEmitterContractID, strkeyRecipientContractID, "transfer", "10.0000000")
}
-func TestContractBurnFromAccount(t *testing.T) {
+func CaseContractBurnFromAccount(t *testing.T) {
if integration.GetCoreMaxSupportedProtocol() < 20 {
t.Skip("This test run does not support less than Protocol 20")
}
itest := integration.NewTest(t, integration.Config{
- ProtocolVersion: 20,
+ ProtocolVersion: 20,
+ HorizonEnvironment: map[string]string{
+ "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)},
EnableSorobanRPC: true,
})
@@ -830,16 +937,6 @@ func TestContractBurnFromAccount(t *testing.T) {
burn(itest, recipientKp.Address(), asset, "500"),
)
- fx := getTxEffects(itest, burnTx, asset)
- require.Len(t, fx, 1)
- assetEffects := assertContainsEffect(t, fx, effects.EffectAccountDebited)
- require.GreaterOrEqual(t, len(assetEffects), 1)
- burnEffect := assetEffects[0].(effects.AccountDebited)
-
- assert.Equal(t, issuer, burnEffect.Asset.Issuer)
- assert.Equal(t, code, burnEffect.Asset.Code)
- assert.Equal(t, "500.0000000", burnEffect.Amount)
- assert.Equal(t, recipientKp.Address(), burnEffect.Account)
assertAssetStats(itest, assetStats{
code: code,
issuer: issuer,
@@ -852,15 +949,33 @@ func TestContractBurnFromAccount(t *testing.T) {
contractID: stellarAssetContractID(itest, asset),
})
assertEventPayments(itest, burnTx, asset, recipientKp.Address(), "", "burn", "500.0000000")
+
+ if !DisabledSoroban {
+ fx := getTxEffects(itest, burnTx, asset)
+ require.Len(t, fx, 1)
+ assetEffects := assertContainsEffect(t, fx, effects.EffectAccountDebited)
+ require.GreaterOrEqual(t, len(assetEffects), 1)
+ burnEffect := assetEffects[0].(effects.AccountDebited)
+
+ assert.Equal(t, issuer, burnEffect.Asset.Issuer)
+ assert.Equal(t, code, burnEffect.Asset.Code)
+ assert.Equal(t, "500.0000000", burnEffect.Amount)
+ assert.Equal(t, recipientKp.Address(), burnEffect.Account)
+ } else {
+ fx := getTxEffects(itest, burnTx, asset)
+ require.Len(t, fx, 0)
+ }
}
-func TestContractBurnFromContract(t *testing.T) {
+func CaseContractBurnFromContract(t *testing.T) {
if integration.GetCoreMaxSupportedProtocol() < 20 {
t.Skip("This test run does not support less than Protocol 20")
}
itest := integration.NewTest(t, integration.Config{
- ProtocolVersion: 20,
+ ProtocolVersion: 20,
+ HorizonEnvironment: map[string]string{
+ "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)},
EnableSorobanRPC: true,
})
@@ -895,19 +1010,6 @@ func TestContractBurnFromContract(t *testing.T) {
burnSelf(itest, issuer, asset, recipientContractID, recipientContractHash, "10"),
)
- balanceAmount, _, _ := assertInvokeHostFnSucceeds(
- itest,
- itest.Master(),
- contractBalance(itest, issuer, asset, recipientContractID),
- )
-
- assert.Equal(itest.CurrentTest(), xdr.ScValTypeScvI128, balanceAmount.Type)
- assert.Equal(itest.CurrentTest(), xdr.Uint64(9900000000), (*balanceAmount.I128).Lo)
- assert.Equal(itest.CurrentTest(), xdr.Int64(0), (*balanceAmount.I128).Hi)
-
- assertContainsEffect(t, getTxEffects(itest, burnTx, asset),
- effects.EffectContractDebited)
-
assertAssetStats(itest, assetStats{
code: code,
issuer: issuer,
@@ -920,15 +1022,35 @@ func TestContractBurnFromContract(t *testing.T) {
contractID: stellarAssetContractID(itest, asset),
})
assertEventPayments(itest, burnTx, asset, strkeyRecipientContractID, "", "burn", "10.0000000")
+
+ if !DisabledSoroban {
+ balanceAmount, _, _ := assertInvokeHostFnSucceeds(
+ itest,
+ itest.Master(),
+ contractBalance(itest, issuer, asset, recipientContractID),
+ )
+
+ assert.Equal(itest.CurrentTest(), xdr.ScValTypeScvI128, balanceAmount.Type)
+ assert.Equal(itest.CurrentTest(), xdr.Uint64(9900000000), (*balanceAmount.I128).Lo)
+ assert.Equal(itest.CurrentTest(), xdr.Int64(0), (*balanceAmount.I128).Hi)
+
+ assertContainsEffect(t, getTxEffects(itest, burnTx, asset),
+ effects.EffectContractDebited)
+ } else {
+ fx := getTxEffects(itest, burnTx, asset)
+ require.Len(t, fx, 0)
+ }
}
-func TestContractClawbackFromAccount(t *testing.T) {
+func CaseContractClawbackFromAccount(t *testing.T) {
if integration.GetCoreMaxSupportedProtocol() < 20 {
t.Skip("This test run does not support less than Protocol 20")
}
itest := integration.NewTest(t, integration.Config{
- ProtocolVersion: 20,
+ ProtocolVersion: 20,
+ HorizonEnvironment: map[string]string{
+ "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)},
EnableSorobanRPC: true,
})
@@ -966,6 +1088,7 @@ func TestContractClawbackFromAccount(t *testing.T) {
)
assertContainsBalance(itest, recipientKp, issuer, code, amount.MustParse("1000"))
+
assertAssetStats(itest, assetStats{
code: code,
issuer: issuer,
@@ -983,8 +1106,6 @@ func TestContractClawbackFromAccount(t *testing.T) {
itest.Master(),
clawback(itest, issuer, asset, "1000", accountAddressParam(recipientKp.Address())),
)
-
- assertContainsEffect(t, getTxEffects(itest, clawTx, asset), effects.EffectAccountDebited)
assertContainsBalance(itest, recipientKp, issuer, code, 0)
assertAssetStats(itest, assetStats{
code: code,
@@ -998,15 +1119,24 @@ func TestContractClawbackFromAccount(t *testing.T) {
contractID: stellarAssetContractID(itest, asset),
})
assertEventPayments(itest, clawTx, asset, recipientKp.Address(), "", "clawback", "1000.0000000")
+
+ if !DisabledSoroban {
+ assertContainsEffect(t, getTxEffects(itest, clawTx, asset), effects.EffectAccountDebited)
+ } else {
+ fx := getTxEffects(itest, clawTx, asset)
+ require.Len(t, fx, 0)
+ }
}
-func TestContractClawbackFromContract(t *testing.T) {
+func CaseContractClawbackFromContract(t *testing.T) {
if integration.GetCoreMaxSupportedProtocol() < 20 {
t.Skip("This test run does not support less than Protocol 20")
}
itest := integration.NewTest(t, integration.Config{
- ProtocolVersion: 20,
+ ProtocolVersion: 20,
+ HorizonEnvironment: map[string]string{
+ "DISABLE_SOROBAN_INGEST": fmt.Sprint(DisabledSoroban)},
EnableSorobanRPC: true,
})
@@ -1044,19 +1174,6 @@ func TestContractClawbackFromContract(t *testing.T) {
itest.Master(),
clawback(itest, issuer, asset, "10", contractAddressParam(recipientContractID)),
)
-
- balanceAmount, _, _ := assertInvokeHostFnSucceeds(
- itest,
- itest.Master(),
- contractBalance(itest, issuer, asset, recipientContractID),
- )
- assert.Equal(itest.CurrentTest(), xdr.ScValTypeScvI128, balanceAmount.Type)
- assert.Equal(itest.CurrentTest(), xdr.Uint64(9900000000), (*balanceAmount.I128).Lo)
- assert.Equal(itest.CurrentTest(), xdr.Int64(0), (*balanceAmount.I128).Hi)
-
- assertContainsEffect(t, getTxEffects(itest, clawTx, asset),
- effects.EffectContractDebited)
-
assertAssetStats(itest, assetStats{
code: code,
issuer: issuer,
@@ -1069,6 +1186,23 @@ func TestContractClawbackFromContract(t *testing.T) {
contractID: stellarAssetContractID(itest, asset),
})
assertEventPayments(itest, clawTx, asset, strkeyRecipientContractID, "", "clawback", "10.0000000")
+
+ if !DisabledSoroban {
+ balanceAmount, _, _ := assertInvokeHostFnSucceeds(
+ itest,
+ itest.Master(),
+ contractBalance(itest, issuer, asset, recipientContractID),
+ )
+ assert.Equal(itest.CurrentTest(), xdr.ScValTypeScvI128, balanceAmount.Type)
+ assert.Equal(itest.CurrentTest(), xdr.Uint64(9900000000), (*balanceAmount.I128).Lo)
+ assert.Equal(itest.CurrentTest(), xdr.Int64(0), (*balanceAmount.I128).Hi)
+
+ assertContainsEffect(t, getTxEffects(itest, clawTx, asset),
+ effects.EffectContractDebited)
+ } else {
+ fx := getTxEffects(itest, clawTx, asset)
+ require.Len(t, fx, 0)
+ }
}
func assertContainsBalance(itest *integration.Test, acct *keypair.Full, issuer, code string, amt xdr.Int64) {
@@ -1179,6 +1313,12 @@ func assertEventPayments(itest *integration.Test, txHash string, asset xdr.Asset
invokeHostFn := ops.Embedded.Records[0].(operations.InvokeHostFunction)
assert.Equal(itest.CurrentTest(), invokeHostFn.Function, "HostFunctionTypeHostFunctionTypeInvokeContract")
+
+ if DisabledSoroban {
+ require.Equal(itest.CurrentTest(), 0, len(invokeHostFn.AssetBalanceChanges))
+ return
+ }
+
require.Equal(itest.CurrentTest(), 1, len(invokeHostFn.AssetBalanceChanges))
assetBalanceChange := invokeHostFn.AssetBalanceChanges[0]
assert.Equal(itest.CurrentTest(), assetBalanceChange.Amount, amount)
@@ -1400,10 +1540,6 @@ func assertInvokeHostFnSucceeds(itest *integration.Test, signer *keypair.Full, o
err = xdr.SafeUnmarshalBase64(clientTx.ResultXdr, &txResult)
require.NoError(itest.CurrentTest(), err)
- var txMetaResult xdr.TransactionMeta
- err = xdr.SafeUnmarshalBase64(clientTx.ResultMetaXdr, &txMetaResult)
- require.NoError(itest.CurrentTest(), err)
-
opResults, ok := txResult.OperationResults()
assert.True(itest.CurrentTest(), ok)
assert.Equal(itest.CurrentTest(), len(opResults), 1)
@@ -1411,9 +1547,18 @@ func assertInvokeHostFnSucceeds(itest *integration.Test, signer *keypair.Full, o
assert.True(itest.CurrentTest(), ok)
assert.Equal(itest.CurrentTest(), invokeHostFunctionResult.Code, xdr.InvokeHostFunctionResultCodeInvokeHostFunctionSuccess)
- returnValue := txMetaResult.MustV3().SorobanMeta.ReturnValue
+ var returnValue *xdr.ScVal
+
+ if !DisabledSoroban {
+ var txMetaResult xdr.TransactionMeta
+ err = xdr.SafeUnmarshalBase64(clientTx.ResultMetaXdr, &txMetaResult)
+ require.NoError(itest.CurrentTest(), err)
+ returnValue = &txMetaResult.MustV3().SorobanMeta.ReturnValue
+ } else {
+ verifySorobanMeta(itest.CurrentTest(), clientTx)
+ }
- return &returnValue, clientTx.Hash, &preFlightOp
+ return returnValue, clientTx.Hash, &preFlightOp
}
func stellarAssetContractID(itest *integration.Test, asset xdr.Asset) xdr.Hash {
diff --git a/services/horizon/internal/test/db/main.go b/services/horizon/internal/test/db/main.go
index 4156ec25fb..6114a677ff 100644
--- a/services/horizon/internal/test/db/main.go
+++ b/services/horizon/internal/test/db/main.go
@@ -29,6 +29,8 @@ func horizonPostgres(t *testing.T) *db.DB {
return horizonDB
}
+// TODO, remove refs to internal core db, need to remove scenario tests which require this
+// to seed core db.
func corePostgres(t *testing.T) *db.DB {
if coreDB != nil {
return coreDB
@@ -60,6 +62,8 @@ func HorizonROURL() string {
return horizonDB.RO_DSN
}
+// TODO, remove refs to core db, need to remove scenario tests which require this
+// to seed core db.
func StellarCore(t *testing.T) *sqlx.DB {
if coreDBConn != nil {
return coreDBConn
@@ -68,6 +72,8 @@ func StellarCore(t *testing.T) *sqlx.DB {
return coreDBConn
}
+// TODO, remove refs to core db, need to remove scenario tests which require this
+// to seed core db.
func StellarCoreURL() string {
if coreDB == nil {
log.Panic(fmt.Errorf("StellarCore not initialized"))
diff --git a/services/horizon/internal/test/main.go b/services/horizon/internal/test/main.go
index fea814b4c3..93ed4a94db 100644
--- a/services/horizon/internal/test/main.go
+++ b/services/horizon/internal/test/main.go
@@ -25,11 +25,12 @@ type StaticMockServer struct {
// T provides a common set of functionality for each test in horizon
type T struct {
- T *testing.T
- Assert *assert.Assertions
- Require *require.Assertions
- Ctx context.Context
- HorizonDB *sqlx.DB
+ T *testing.T
+ Assert *assert.Assertions
+ Require *require.Assertions
+ Ctx context.Context
+ HorizonDB *sqlx.DB
+ //TODO - remove ref to core db once scenario tests are removed.
CoreDB *sqlx.DB
EndLogTest func() []logrus.Entry
}
diff --git a/services/horizon/internal/test/t.go b/services/horizon/internal/test/t.go
index c2a75da986..2f86f70565 100644
--- a/services/horizon/internal/test/t.go
+++ b/services/horizon/internal/test/t.go
@@ -18,7 +18,7 @@ import (
"github.com/stellar/go/support/render/hal"
)
-// CoreSession returns a db.Session instance pointing at the stellar core test database
+// TODO - remove ref to core db once scenario tests are removed.
func (t *T) CoreSession() *db.Session {
return &db.Session{
DB: t.CoreDB,
@@ -143,17 +143,7 @@ func (t *T) UnmarshalExtras(r io.Reader) map[string]string {
func (t *T) LoadLedgerStatus() ledger.Status {
var next ledger.Status
- err := t.CoreSession().GetRaw(t.Ctx, &next, `
- SELECT
- COALESCE(MAX(ledgerseq), 0) as core_latest
- FROM ledgerheaders
- `)
-
- if err != nil {
- panic(err)
- }
-
- err = t.HorizonSession().GetRaw(t.Ctx, &next, `
+ err := t.HorizonSession().GetRaw(t.Ctx, &next, `
SELECT
COALESCE(MIN(sequence), 0) as history_elder,
COALESCE(MAX(sequence), 0) as history_latest
diff --git a/support/log/entry.go b/support/log/entry.go
index a4661cb8c0..9b3b596025 100644
--- a/support/log/entry.go
+++ b/support/log/entry.go
@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"io"
- "io/ioutil"
gerr "github.com/go-errors/errors"
"github.com/sirupsen/logrus"
@@ -198,7 +197,7 @@ func (e *Entry) StartTest(level logrus.Level) func() []logrus.Entry {
e.entry.Logger.AddHook(hook)
old := e.entry.Logger.Out
- e.entry.Logger.Out = ioutil.Discard
+ e.entry.Logger.Out = io.Discard
oldLevel := e.entry.Logger.GetLevel()
e.entry.Logger.SetLevel(level)