Skip to content
This repository has been archived by the owner on Apr 15, 2024. It is now read-only.

test: add e2e test for data commitment window change #491

Merged
merged 8 commits into from
Sep 27, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion e2e/Dockerfile_e2e
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,6 @@ RUN apk update && apk --no-cache add bash jq coreutils curl
COPY --from=builder /orchestrator-relayer/build/qgb /bin/qgb

# p2p port
EXPOSE 30000
EXPOSE 9090 26657 30000

CMD [ "/bin/qgb" ]
14 changes: 7 additions & 7 deletions e2e/celestia-app/config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -383,21 +383,21 @@ version = "v0"
wal_file = "data/cs.wal/wal"

# How long we wait for a proposal block before prevoting nil
timeout_propose = "50s"
timeout_propose = "500ms"
adlerjohn marked this conversation as resolved.
Show resolved Hide resolved
# How much timeout_propose increases with each round
timeout_propose_delta = "10ms"
timeout_propose_delta = "5ms"
# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil)
timeout_prevote = "100ms"
timeout_prevote = "5ms"
# How much the timeout_prevote increases with each round
timeout_prevote_delta = "10ms"
timeout_prevote_delta = "1ms"
# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil)
timeout_precommit = "100ms"
timeout_precommit = "5ms"
# How much the timeout_precommit increases with each round
timeout_precommit_delta = "10ms"
timeout_precommit_delta = "1ms"
# How long we wait after committing a block, before starting on the new
# height (this gives us a chance to receive some more precommits, even
# though we already have +2/3).
timeout_commit = "50ms"
timeout_commit = "1ms"

# How many blocks to look back to check existence of the node's consensus votes before joining consensus
# When non-zero, the node will panic upon restart
Expand Down
10 changes: 5 additions & 5 deletions e2e/celestia-app/genesis.json
Original file line number Diff line number Diff line change
Expand Up @@ -255,18 +255,18 @@
"min_deposit": [
{
"denom": "utia",
"amount": "10000000"
"amount": "100"
}
],
"max_deposit_period": "172800s"
},
"voting_params": {
"voting_period": "172800s"
"voting_period": "30s"
},
"tally_params": {
"quorum": "0.334000000000000000",
"threshold": "0.500000000000000000",
"veto_threshold": "0.334000000000000000"
"quorum": "0.000001",
"threshold": "0.000001",
"veto_threshold": "0.001"
}
},
"ibc": {
Expand Down
10 changes: 5 additions & 5 deletions e2e/celestia-app/genesis_template.json
Original file line number Diff line number Diff line change
Expand Up @@ -255,18 +255,18 @@
"min_deposit": [
{
"denom": "utia",
"amount": "10000000"
"amount": "100"
}
],
"max_deposit_period": "172800s"
},
"voting_params": {
"voting_period": "172800s"
"voting_period": "30s"
},
"tally_params": {
"quorum": "0.334000000000000000",
"threshold": "0.500000000000000000",
"veto_threshold": "0.334000000000000000"
"quorum": "0.000001",
"threshold": "0.000001",
"veto_threshold": "0.001"
}
},
"ibc": {
Expand Down
29 changes: 17 additions & 12 deletions e2e/orchestrator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,9 @@ func TestOrchestratorWithOneValidator(t *testing.T) {
HandleNetworkError(t, network, err, false)

ctx := context.Background()
err = network.WaitForBlock(ctx, int64(network.DataCommitmentWindow+50))
window, err := network.GetCurrentDataCommitmentWindow(ctx)
require.NoError(t, err)
err = network.WaitForBlock(ctx, int64(window+50))
HandleNetworkError(t, network, err, false)

// create dht for querying
Expand All @@ -53,7 +55,7 @@ func TestOrchestratorWithOneValidator(t *testing.T) {
// give the orchestrators some time to catchup
time.Sleep(time.Second)

err = network.WaitForBlock(ctx, int64(network.DataCommitmentWindow+height))
err = network.WaitForBlock(ctx, int64(window+height))
HandleNetworkError(t, network, err, false)

dcConfirm, err := network.GetDataCommitmentConfirmByHeight(ctx, dht, height, CORE0EVMADDRESS)
Expand Down Expand Up @@ -93,8 +95,9 @@ func TestOrchestratorWithTwoValidators(t *testing.T) {
HandleNetworkError(t, network, err, false)

ctx := context.Background()

err = network.WaitForBlock(ctx, int64(network.DataCommitmentWindow+50))
window, err := network.GetCurrentDataCommitmentWindow(ctx)
require.NoError(t, err)
err = network.WaitForBlock(ctx, int64(window+50))
HandleNetworkError(t, network, err, false)

// create dht for querying
Expand Down Expand Up @@ -127,7 +130,7 @@ func TestOrchestratorWithTwoValidators(t *testing.T) {
// assert that it carries the right evm address
assert.Equal(t, CORE0EVMADDRESS, core0ValsetConfirm.EthAddress)

err = network.WaitForBlock(ctx, int64(network.DataCommitmentWindow+c0Height))
err = network.WaitForBlock(ctx, int64(window+c0Height))
HandleNetworkError(t, network, err, false)

// check core0 submitted the data commitment confirm
Expand All @@ -143,7 +146,7 @@ func TestOrchestratorWithTwoValidators(t *testing.T) {
// assert that it carries the right evm address
assert.Equal(t, CORE0EVMADDRESS, core0DataCommitmentConfirm.EthAddress)

err = network.WaitForBlock(ctx, int64(network.DataCommitmentWindow+c1Height))
err = network.WaitForBlock(ctx, int64(window+c1Height))
HandleNetworkError(t, network, err, false)

// check core1 submitted the data commitment confirm
Expand All @@ -168,8 +171,9 @@ func TestOrchestratorWithMultipleValidators(t *testing.T) {
HandleNetworkError(t, network, err, false)

ctx := context.Background()

err = network.WaitForBlock(ctx, int64(network.DataCommitmentWindow+50))
window, err := network.GetCurrentDataCommitmentWindow(ctx)
require.NoError(t, err)
err = network.WaitForBlock(ctx, int64(window+50))
HandleNetworkError(t, network, err, false)

// create dht for querying
Expand Down Expand Up @@ -208,7 +212,7 @@ func TestOrchestratorWithMultipleValidators(t *testing.T) {
// assert that it carries the right evm address
assert.Equal(t, CORE0EVMADDRESS, core0ValsetConfirm.EthAddress)

err = network.WaitForBlock(ctx, int64(network.DataCommitmentWindow+c0Height))
err = network.WaitForBlock(ctx, int64(window+c0Height))
HandleNetworkError(t, network, err, false)

// check core0 submitted the data commitment confirm
Expand Down Expand Up @@ -260,8 +264,9 @@ func TestOrchestratorReplayOld(t *testing.T) {
HandleNetworkError(t, network, err, false)

ctx := context.Background()

err = network.WaitForBlock(ctx, int64(2*network.DataCommitmentWindow))
window, err := network.GetCurrentDataCommitmentWindow(ctx)
require.NoError(t, err)
err = network.WaitForBlock(ctx, int64(2*window))
HandleNetworkError(t, network, err, false)

// add core0 orchestrator
Expand All @@ -273,7 +278,7 @@ func TestOrchestratorReplayOld(t *testing.T) {
HandleNetworkError(t, network, err, false)

// give time for the orchestrators to submit confirms
err = network.WaitForBlock(ctx, int64(2*network.DataCommitmentWindow+50))
err = network.WaitForBlock(ctx, int64(2*window+50))
HandleNetworkError(t, network, err, false)

// create dht for querying
Expand Down
182 changes: 161 additions & 21 deletions e2e/qgb_network.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,15 @@ import (
"strings"
"time"

"github.com/celestiaorg/celestia-app/pkg/user"
"github.com/cosmos/cosmos-sdk/crypto/keyring"
sdk "github.com/cosmos/cosmos-sdk/types"
v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1"
"github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1"
"github.com/cosmos/cosmos-sdk/x/params/types/proposal"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"

qgbwrapper "github.com/celestiaorg/quantum-gravity-bridge/v2/wrappers/QuantumGravityBridge.sol"

"github.com/celestiaorg/celestia-app/app"
Expand All @@ -28,16 +37,15 @@ import (
)

type QGBNetwork struct {
ComposePaths []string
Identifier string
Instance *testcontainers.LocalDockerCompose
EVMRPC string
TendermintRPC string
CelestiaGRPC string
P2PAddr string
EncCfg encoding.Config
DataCommitmentWindow uint64
Logger tmlog.Logger
ComposePaths []string
Identifier string
Instance *testcontainers.LocalDockerCompose
EVMRPC string
TendermintRPC string
CelestiaGRPC string
P2PAddr string
EncCfg encoding.Config
Logger tmlog.Logger

// used by the moderator to notify all the workers.
stopChan <-chan struct{}
Expand All @@ -54,17 +62,16 @@ func NewQGBNetwork() (*QGBNetwork, error) {
// and wanted to notify the moderator.
toStopChan := make(chan struct{}, 10)
network := &QGBNetwork{
Identifier: id,
ComposePaths: paths,
Instance: instance,
EVMRPC: "http://localhost:8545",
TendermintRPC: "tcp://localhost:26657",
CelestiaGRPC: "localhost:9090",
P2PAddr: "localhost:30000",
EncCfg: encoding.MakeConfig(app.ModuleEncodingRegisters...),
DataCommitmentWindow: 101, // If this one is changed, make sure to change also the genesis file
stopChan: stopChan,
toStopChan: toStopChan,
Identifier: id,
ComposePaths: paths,
Instance: instance,
EVMRPC: "http://localhost:8545",
TendermintRPC: "tcp://localhost:26657",
CelestiaGRPC: "localhost:9090",
P2PAddr: "localhost:30000",
EncCfg: encoding.MakeConfig(app.ModuleEncodingRegisters...),
stopChan: stopChan,
toStopChan: toStopChan,
}

// moderate stop notifications from waiters.
Expand Down Expand Up @@ -882,6 +889,100 @@ func (network QGBNetwork) WaitForEventNonce(ctx context.Context, bridge *qgbwrap
}
}

func (network QGBNetwork) UpdateDataCommitmentWindow(ctx context.Context, newWindow uint64) error {
fmt.Printf("updating data commitment window %d\n", newWindow)
kr, err := keyring.New(
"qgb-tests",
"test",
"celestia-app/core0",
nil,
encoding.MakeConfig(app.ModuleEncodingRegisters...).Codec,
)
if err != nil {
return err
}
qgbGRPC, err := grpc.Dial("localhost:9090", grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
return err
}
defer qgbGRPC.Close()

signer, err := user.SetupSingleSigner(ctx, kr, qgbGRPC, encoding.MakeConfig(app.ModuleEncodingRegisters...))
if err != nil {
return err
}

// create and submit a new param change proposal for the data commitment window
change := proposal.NewParamChange(
types.ModuleName,
string(types.ParamsStoreKeyDataCommitmentWindow),
fmt.Sprintf("\"%d\"", newWindow),
)
content := proposal.NewParameterChangeProposal(
"data commitment window update",
"description",
[]proposal.ParamChange{change},
)

msg, err := v1beta1.NewMsgSubmitProposal(
content,
sdk.NewCoins(
sdk.NewCoin(app.BondDenom, sdk.NewInt(5000000))),
signer.Address(),
)
if err != nil {
return err
}

_, err = signer.SubmitTx(ctx, []sdk.Msg{msg}, user.SetGasLimitAndFee(3000000, 300000))
if err != nil {
return err
}

// query the proposal to get the id
gqc := v1.NewQueryClient(qgbGRPC)
gresp, err := gqc.Proposals(
ctx,
&v1.QueryProposalsRequest{
ProposalStatus: v1.ProposalStatus_PROPOSAL_STATUS_VOTING_PERIOD,
},
)
if err != nil {
return err
}
if len(gresp.Proposals) != 1 {
return fmt.Errorf("expected to have only one proposal in voting period")
}

// create and submit a new vote
vote := v1.NewMsgVote(
signer.Address(),
gresp.Proposals[0].Id,
v1.VoteOption_VOTE_OPTION_YES,
"",
)

_, err = signer.SubmitTx(ctx, []sdk.Msg{vote}, user.SetGasLimitAndFee(3000000, 300000))
if err != nil {
return err
}

// wait for the voting period to finish
time.Sleep(25 * time.Second)

// check that the parameters got updated as expected
currentWindow, err := network.GetCurrentDataCommitmentWindow(ctx)
if err != nil {
return err
}
if currentWindow != newWindow {
return fmt.Errorf("data commitment window was not updated successfully. %d vs %d", currentWindow, newWindow)
}

fmt.Println("updated data commitment window successfully")
return nil
}

func (network QGBNetwork) PrintLogs() {
_ = network.Instance.
WithCommand([]string{"logs"}).
Expand All @@ -903,3 +1004,42 @@ func (network QGBNetwork) GetLatestValset(ctx context.Context) (*types.Valset, e
}
return valset, nil
}

func (network QGBNetwork) GetCurrentDataCommitmentWindow(ctx context.Context) (uint64, error) {
var window uint64
queryFun := func() error {
qgbGRPC, err := grpc.Dial("localhost:9090", grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
return err
}
defer qgbGRPC.Close()
bqc := types.NewQueryClient(qgbGRPC)
presp, err := bqc.Params(ctx, &types.QueryParamsRequest{})
if err != nil {
return err
}
window = presp.Params.DataCommitmentWindow
return nil
}
ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
for {
select {
case <-network.stopChan:
cancel()
return 0, ErrNetworkStopped
case <-ctx.Done():
cancel()
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
return 0, fmt.Errorf("couldn't query data commitment window")
}
return 0, ctx.Err()
default:
err := queryFun()
if err == nil {
cancel()
return window, nil
}
time.Sleep(2 * time.Second)
}
}
}
Loading
Loading