diff --git a/tests/e2e/configurer/chain/chain.go b/tests/e2e/configurer/chain/chain.go index 5fcce8f941f..fb39b752d60 100644 --- a/tests/e2e/configurer/chain/chain.go +++ b/tests/e2e/configurer/chain/chain.go @@ -80,9 +80,28 @@ func (c *Config) RemoveNode(nodeName string) error { return fmt.Errorf("node %s not found", nodeName) } +// WaitUntilEpoch waits for the chain to reach the specified epoch. +func (c *Config) WaitUntilEpoch(epoch int64, epochIdentifier string) { + node, err := c.GetDefaultNode() + require.NoError(c.t, err) + node.WaitUntil(func(_ coretypes.SyncInfo) bool { + newEpochNumber := node.QueryCurrentEpoch(epochIdentifier) + c.t.Logf("current epoch number is (%d), waiting to reach (%d)", newEpochNumber, epoch) + return newEpochNumber >= epoch + }) +} + +// WaitForNumEpochs waits for the chain to to go through a given number of epochs. +func (c *Config) WaitForNumEpochs(epochsToWait int64, epochIdentifier string) { + node, err := c.GetDefaultNode() + require.NoError(c.t, err) + oldEpochNumber := node.QueryCurrentEpoch(epochIdentifier) + c.WaitUntilEpoch(oldEpochNumber+epochsToWait, epochIdentifier) +} + // WaitUntilHeight waits for all validators to reach the specified height at the minimum. // returns error, if any. -func (c *Config) WaitUntilHeight(height int64) error { +func (c *Config) WaitUntilHeight(height int64) { // Ensure the nodes are making progress. doneCondition := func(syncInfo coretypes.SyncInfo) bool { curHeight := syncInfo.LatestBlockHeight @@ -97,11 +116,16 @@ func (c *Config) WaitUntilHeight(height int64) error { for _, node := range c.NodeConfigs { c.t.Logf("node container: %s, waiting to reach height %d", node.Name, height) - if err := node.WaitUntil(doneCondition); err != nil { - return err - } + node.WaitUntil(doneCondition) } - return nil +} + +// WaitForNumHeights waits for all nodes to go through a given number of heights. +func (c *Config) WaitForNumHeights(heightsToWait int64) { + node, err := c.GetDefaultNode() + require.NoError(c.t, err) + currentHeight := node.QueryCurrentHeight() + c.WaitUntilHeight(currentHeight + heightsToWait) } func (c *Config) SendIBC(dstChain *Config, recipient string, token sdk.Coin) { diff --git a/tests/e2e/configurer/chain/node.go b/tests/e2e/configurer/chain/node.go index ee47b17f43e..5bb1a9c7a0c 100644 --- a/tests/e2e/configurer/chain/node.go +++ b/tests/e2e/configurer/chain/node.go @@ -92,22 +92,20 @@ func (n *NodeConfig) Stop() error { // WaitUntil waits until node reaches doneCondition. Return nil // if reached, error otherwise. -func (n *NodeConfig) WaitUntil(doneCondition func(syncInfo coretypes.SyncInfo) bool) error { +func (n *NodeConfig) WaitUntil(doneCondition func(syncInfo coretypes.SyncInfo) bool) { var latestBlockHeight int64 for i := 0; i < waitUntilrepeatMax; i++ { status, err := n.rpcClient.Status(context.Background()) - if err != nil { - return err - } + require.NoError(n.t, err) latestBlockHeight = status.SyncInfo.LatestBlockHeight // let the node produce a few blocks if !doneCondition(status.SyncInfo) { time.Sleep(waitUntilRepeatPauseTime) continue } - return nil + return } - return fmt.Errorf("node %s timed out waiting for condition, latest block height was %d", n.Name, latestBlockHeight) + n.t.Errorf("node %s timed out waiting for condition, latest block height was %d", n.Name, latestBlockHeight) } func (n *NodeConfig) extractOperatorAddressIfValidator() error { diff --git a/tests/e2e/configurer/chain/queries.go b/tests/e2e/configurer/chain/queries.go index 9cadfafae6f..c7f10a82fba 100644 --- a/tests/e2e/configurer/chain/queries.go +++ b/tests/e2e/configurer/chain/queries.go @@ -19,6 +19,7 @@ import ( "github.com/osmosis-labs/osmosis/v12/tests/e2e/util" epochstypes "github.com/osmosis-labs/osmosis/v12/x/epochs/types" superfluidtypes "github.com/osmosis-labs/osmosis/v12/x/superfluid/types" + twapqueryproto "github.com/osmosis-labs/osmosis/v12/x/twap/client/queryproto" ) func (n *NodeConfig) QueryGRPCGateway(path string, parameters ...string) ([]byte, error) { @@ -145,6 +146,49 @@ func (n *NodeConfig) QueryCurrentEpoch(identifier string) int64 { return response.CurrentEpoch } +func (n *NodeConfig) QueryArithmeticTwapToNow(poolId uint64, baseAsset, quoteAsset string, startTime time.Time) (sdk.Dec, error) { + path := "osmosis/twap/v1beta1/ArithmeticTwapToNow" + + bz, err := n.QueryGRPCGateway( + path, + "pool_id", strconv.FormatInt(int64(poolId), 10), + "base_asset", baseAsset, + "quote_asset", quoteAsset, + "start_time", startTime.Format(time.RFC3339Nano), + ) + + if err != nil { + return sdk.Dec{}, err + } + + var response twapqueryproto.ArithmeticTwapToNowResponse + err = util.Cdc.UnmarshalJSON(bz, &response) + require.NoError(n.t, err) // this error should not happen + return response.ArithmeticTwap, nil +} + +func (n *NodeConfig) QueryArithmeticTwap(poolId uint64, baseAsset, quoteAsset string, startTime time.Time, endTime time.Time) (sdk.Dec, error) { + path := "osmosis/twap/v1beta1/ArithmeticTwap" + + bz, err := n.QueryGRPCGateway( + path, + "pool_id", strconv.FormatInt(int64(poolId), 10), + "base_asset", baseAsset, + "quote_asset", quoteAsset, + "start_time", startTime.Format(time.RFC3339Nano), + "end_time", endTime.Format(time.RFC3339Nano), + ) + + if err != nil { + return sdk.Dec{}, err + } + + var response twapqueryproto.ArithmeticTwapResponse + err = util.Cdc.UnmarshalJSON(bz, &response) + require.NoError(n.t, err) // this error should not happen + return response.ArithmeticTwap, nil +} + // QueryHashFromBlock gets block hash at a specific height. Otherwise, error. func (n *NodeConfig) QueryHashFromBlock(height int64) (string, error) { block, err := n.rpcClient.Block(context.Background(), &height) @@ -161,6 +205,13 @@ func (n *NodeConfig) QueryCurrentHeight() int64 { return status.SyncInfo.LatestBlockHeight } +// QueryLatestBlockTime returns the latest block time. +func (n *NodeConfig) QueryLatestBlockTime() time.Time { + status, err := n.rpcClient.Status(context.Background()) + require.NoError(n.t, err) + return status.SyncInfo.LatestBlockTime +} + // QueryListSnapshots gets all snapshots currently created for a node. func (n *NodeConfig) QueryListSnapshots() ([]*tmabcitypes.Snapshot, error) { abciResponse, err := n.rpcClient.ABCIQuery(context.Background(), "/app/snapshots", nil) diff --git a/tests/e2e/configurer/upgrade.go b/tests/e2e/configurer/upgrade.go index ab2aec424f2..e0345bc1be8 100644 --- a/tests/e2e/configurer/upgrade.go +++ b/tests/e2e/configurer/upgrade.go @@ -172,9 +172,7 @@ func (uc *UpgradeConfigurer) runProposalUpgrade() error { // wait till all chains halt at upgrade height for _, chainConfig := range uc.chainConfigs { uc.t.Logf("waiting to reach upgrade height on chain %s", chainConfig.Id) - if err := chainConfig.WaitUntilHeight(chainConfig.UpgradePropHeight); err != nil { - return err - } + chainConfig.WaitUntilHeight(chainConfig.UpgradePropHeight) uc.t.Logf("upgrade height reached on chain %s", chainConfig.Id) } @@ -200,9 +198,7 @@ func (uc *UpgradeConfigurer) runProposalUpgrade() error { func (uc *UpgradeConfigurer) runForkUpgrade() error { for _, chainConfig := range uc.chainConfigs { uc.t.Logf("waiting to reach fork height on chain %s", chainConfig.Id) - if err := chainConfig.WaitUntilHeight(uc.forkHeight); err != nil { - return err - } + chainConfig.WaitUntilHeight(uc.forkHeight) uc.t.Logf("fork height reached on chain %s", chainConfig.Id) } return nil @@ -221,9 +217,7 @@ func (uc *UpgradeConfigurer) upgradeContainers(chainConfig *chain.Config, propHe } uc.t.Logf("waiting to upgrade containers on chain %s", chainConfig.Id) - if err := chainConfig.WaitUntilHeight(propHeight); err != nil { - return err - } + chainConfig.WaitUntilHeight(propHeight) uc.t.Logf("upgrade successful on chain %s", chainConfig.Id) return nil } diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go index 4cccc8ebc59..3abe7398181 100644 --- a/tests/e2e/e2e_test.go +++ b/tests/e2e/e2e_test.go @@ -13,6 +13,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" coretypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/osmosis-labs/osmosis/v12/app/apptesting/osmoassert" appparams "github.com/osmosis-labs/osmosis/v12/app/params" "github.com/osmosis-labs/osmosis/v12/tests/e2e/configurer/config" "github.com/osmosis-labs/osmosis/v12/tests/e2e/initialization" @@ -149,20 +150,21 @@ func (s *IntegrationTestSuite) TestAddToExistingLock() { // The records are guranteed to be pruned at the next epoch // because twap keep time = epoch time / 4 and we use a timer // to wait for at least the twap keep time. -// TODO: implement querying for TWAP, once such queries are exposed: -// https://github.com/osmosis-labs/osmosis/issues/2602 func (s *IntegrationTestSuite) TestTWAP() { const ( poolFile = "nativeDenomPool.json" walletName = "swap-exact-amount-in-wallet" - coinIn = "101stake" - minAmountOut = "99" - denomOut = "uosmo" + denomOne = "stake" + denomTwo = "uosmo" + + minAmountOut = "1" epochIdentifier = "day" ) + var coinIn = fmt.Sprintf("20000%s", denomOne) + chainA := s.configurer.GetChainConfig(0) chainANode, err := chainA.GetDefaultNode() s.NoError(err) @@ -171,16 +173,66 @@ func (s *IntegrationTestSuite) TestTWAP() { poolId := chainANode.CreatePool(poolFile, initialization.ValidatorWalletName) swapWalletAddr := chainANode.CreateWallet(walletName) + timeBeforeSwap := chainANode.QueryLatestBlockTime() + // Wait for the next height so that the requested twap + // start time (timeBeforeSwap) is not equal to the block time. + chainA.WaitForNumHeights(1) + + s.T().Log("querying for the first TWAP to now before swap") + twapFromBeforeSwapToBeforeSwapOne, err := chainANode.QueryArithmeticTwapToNow(poolId, denomOne, denomTwo, timeBeforeSwap) + s.Require().NoError(err) + chainANode.BankSend(coinIn, chainA.NodeConfigs[0].PublicAddress, swapWalletAddr) - heightBeforeSwap := chainANode.QueryCurrentHeight() - // Triggers the creation of TWAP records. - chainANode.SwapExactAmountIn(coinIn, minAmountOut, fmt.Sprintf("%d", poolId), denomOut, swapWalletAddr) + s.T().Log("querying for the second TWAP to now before swap, must equal to first") + twapFromBeforeSwapToBeforeSwapTwo, err := chainANode.QueryArithmeticTwapToNow(poolId, denomOne, denomTwo, timeBeforeSwap.Add(50*time.Millisecond)) + s.Require().NoError(err) + + // Since there were no swaps between the two queries, the TWAPs should be the same. + osmoassert.DecApproxEq(s.T(), twapFromBeforeSwapToBeforeSwapOne, twapFromBeforeSwapToBeforeSwapTwo, sdk.NewDecWithPrec(1, 3)) + + s.T().Log("performing swap") + chainANode.SwapExactAmountIn(coinIn, minAmountOut, fmt.Sprintf("%d", poolId), denomTwo, swapWalletAddr) keepPeriodCountDown := time.NewTimer(initialization.TWAPPruningKeepPeriod) - // Make sure still producing blocks. - chainA.WaitUntilHeight(heightBeforeSwap + 3) + // Make sure that we are still producing blocks and move far enough for the swap TWAP record to be created + // so that we can measure start time post-swap (timeAfterSwap). + chainA.WaitForNumHeights(2) + + // Measure time after swap and wait for a few blocks to be produced. + // This is needed to ensure that start time is before the block time + // when we query for TWAP. + timeAfterSwap := chainANode.QueryLatestBlockTime() + chainA.WaitForNumHeights(2) + + // TWAP "from before to after swap" should be different from "from before to before swap" + // because swap introduces a new record with a different spot price. + s.T().Log("querying for the TWAP from before swap to now after swap, it must be less than the TWAPs before swap") + twapFromBeforeSwapToAfterSwap, err := chainANode.QueryArithmeticTwapToNow(poolId, denomOne, denomTwo, timeBeforeSwap) + s.Require().NoError(err) + // We had a swap of 20000stake for some amount of uosmo. + // TWAP base is stake, quote is uosmo. + // After swap, we should be getting fewer uosmo for 1 stake. + s.Require().True(twapFromBeforeSwapToAfterSwap.LT(twapFromBeforeSwapToBeforeSwapOne)) + + s.T().Log("querying for the TWAP from after swap to now, must less than TWAPs before swap") + twapFromAfterToNow, err := chainANode.QueryArithmeticTwapToNow(poolId, denomOne, denomTwo, timeAfterSwap) + s.Require().NoError(err) + // Because twapFromAfterToNow has a higher time weight for the after swap period between the two, + // we expect twapFromBeforeSwapToAfterSwap to be larger than twapFromAfterToNow + s.Require().True(twapFromBeforeSwapToAfterSwap.GT(twapFromAfterToNow)) + + s.T().Log("querying for the TWAP from after swap to after swap + 10ms, must be less than TWAPs before swap") + twapAfterSwapBeforePruning10Ms, err := chainANode.QueryArithmeticTwap(poolId, denomOne, denomTwo, timeAfterSwap, timeAfterSwap.Add(10*time.Millisecond)) + s.Require().NoError(err) + // Again, because twapAfterSwapBeforePruning10Ms has a higher time weight for the after swap period between the two, + // we expect twapFromBeforeSwapToAfterSwap to be larger than twapFromAfterToNow + s.Require().True(twapFromBeforeSwapToAfterSwap.GT(twapAfterSwapBeforePruning10Ms)) + + // These must be equal because they are calculated over time ranges with the stable and equal spot price. + // There are potential rounding errors requiring us to approximate the comparison. + osmoassert.DecApproxEq(s.T(), twapAfterSwapBeforePruning10Ms, twapFromAfterToNow, sdk.NewDecWithPrec(1, 3)) if !s.skipUpgrade { // TODO: we should reduce the pruning time in the v11 @@ -195,13 +247,30 @@ func (s *IntegrationTestSuite) TestTWAP() { // Make sure that the pruning keep period has passed. s.T().Logf("waiting for pruning keep period of (%.f) seconds to pass", initialization.TWAPPruningKeepPeriod.Seconds()) <-keepPeriodCountDown.C - oldEpochNumber := chainANode.QueryCurrentEpoch(epochIdentifier) - // The pruning should happen at the next epoch. - chainANode.WaitUntil(func(_ coretypes.SyncInfo) bool { - newEpochNumber := chainANode.QueryCurrentEpoch(epochIdentifier) - s.T().Logf("Current epoch number is (%d), waiting to reach (%d)", newEpochNumber, oldEpochNumber+1) - return newEpochNumber > oldEpochNumber - }) + + // Epoch end triggers the prunning of TWAP records. + // Records before swap should be pruned. + chainA.WaitForNumEpochs(1, epochIdentifier) + + // We should not be able to get TWAP before swap since it should have been pruned. + s.T().Log("pruning is now complete, querying TWAP for period that should be pruned") + _, err = chainANode.QueryArithmeticTwapToNow(poolId, denomOne, denomTwo, timeBeforeSwap) + s.Require().ErrorContains(err, "too old") + + // TWAPs for the same time range should be the same when we query for them before and after pruning. + s.T().Log("querying for TWAP for period before pruning took place but should not have been pruned") + twapAfterPruning10ms, err := chainANode.QueryArithmeticTwap(poolId, denomOne, denomTwo, timeAfterSwap, timeAfterSwap.Add(10*time.Millisecond)) + s.Require().NoError(err) + s.Require().Equal(twapAfterSwapBeforePruning10Ms, twapAfterPruning10ms) + + // TWAP "from after to after swap" should equal to "from after swap to after pruning" + // These must be equal because they are calculated over time ranges with the stable and equal spot price. + timeAfterPruning := chainANode.QueryLatestBlockTime() + s.T().Log("querying for TWAP from after swap to after pruning") + twapToNowPostPruning, err := chainANode.QueryArithmeticTwap(poolId, denomOne, denomTwo, timeAfterSwap, timeAfterPruning) + s.Require().NoError(err) + // There are potential rounding errors requiring us to approximate the comparison. + osmoassert.DecApproxEq(s.T(), twapToNowPostPruning, twapAfterSwapBeforePruning10Ms, sdk.NewDecWithPrec(1, 3)) } func (s *IntegrationTestSuite) TestStateSync() {