diff --git a/mempool/mempool.go b/mempool/mempool.go index 15b45b285..9b6612614 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -6,6 +6,7 @@ import ( "fmt" "sync" + "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/holiman/uint256" @@ -424,6 +425,16 @@ func (m *ExperimentalEVMMempool) HasEventBus() bool { return m.eventBus != nil } +// Has returns true if the transaction with the given hash is already in the mempool. +// This checks tx pool for EVM transactions, which iterates through all pools (currently only legacypool) +func (m *ExperimentalEVMMempool) Has(hash common.Hash) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + // Check the tx pool + return m.txPool.Has(hash) +} + // Close unsubscribes from the CometBFT event bus and shuts down the mempool. func (m *ExperimentalEVMMempool) Close() error { var errs []error diff --git a/rpc/backend/call_tx.go b/rpc/backend/call_tx.go index 61dfc0b34..b505d3510 100644 --- a/rpc/backend/call_tx.go +++ b/rpc/backend/call_tx.go @@ -17,6 +17,7 @@ import ( "google.golang.org/grpc/status" "github.com/cosmos/evm/mempool" + "github.com/cosmos/evm/mempool/txpool" rpctypes "github.com/cosmos/evm/rpc/types" evmtypes "github.com/cosmos/evm/x/vm/types" @@ -148,6 +149,12 @@ func (b *Backend) SendRawTransaction(data hexutil.Bytes) (common.Hash, error) { txHash := ethereumTx.AsTransaction().Hash() + // Check if transaction is already in the mempool before broadcasting + // This is important for user-submitted transactions via JSON-RPC to provide proper error feedback + if b.Mempool != nil && b.Mempool.Has(txHash) { + return txHash, txpool.ErrAlreadyKnown + } + syncCtx := b.ClientCtx.WithBroadcastMode(flags.BroadcastSync) rsp, err := syncCtx.BroadcastTx(txBytes) if rsp != nil && rsp.Code != 0 { diff --git a/tests/systemtests/main_test.go b/tests/systemtests/main_test.go index 50ab0c319..4b2609c99 100644 --- a/tests/systemtests/main_test.go +++ b/tests/systemtests/main_test.go @@ -3,11 +3,11 @@ package systemtests import ( - "testing" - "github.com/cosmos/evm/tests/systemtests/accountabstraction" "github.com/cosmos/evm/tests/systemtests/chainupgrade" "github.com/cosmos/evm/tests/systemtests/eip712" + "testing" + "github.com/cosmos/evm/tests/systemtests/mempool" "github.com/cosmos/evm/tests/systemtests/suite" @@ -19,7 +19,7 @@ func TestMain(m *testing.M) { } /* - * Mempool Tests +* Mempool Tests */ func TestMempoolTxsOrdering(t *testing.T) { suite.RunWithSharedSuite(t, mempool.RunTxsOrdering) @@ -41,8 +41,8 @@ func TestMempoolMixedTxsReplacementLegacyAndDynamicFee(t *testing.T) { suite.RunWithSharedSuite(t, mempool.RunMixedTxsReplacementLegacyAndDynamicFee) } -func TestMempoolTxRebroadcasting(t *testing.T) { - suite.RunWithSharedSuite(t, mempool.RunTxRebroadcasting) +func TestMempoolTxBroadcasting(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunTxBroadcasting) } func TestMinimumGasPricesZero(t *testing.T) { @@ -53,9 +53,9 @@ func TestMempoolCosmosTxsCompatibility(t *testing.T) { suite.RunWithSharedSuite(t, mempool.RunCosmosTxsCompatibility) } -/* - * EIP-712 Tests - */ +// /* +// * EIP-712 Tests +// */ func TestEIP712BankSend(t *testing.T) { suite.RunWithSharedSuite(t, eip712.RunEIP712BankSend) } @@ -69,14 +69,14 @@ func TestEIP712MultipleBankSends(t *testing.T) { } /* - * Account Abstraction Tests +* Account Abstraction Tests */ func TestAccountAbstractionEIP7702(t *testing.T) { suite.RunWithSharedSuite(t, accountabstraction.RunEIP7702) } /* - * Chain Upgrade Tests +* Chain Upgrade Tests */ func TestChainUpgrade(t *testing.T) { suite.RunWithSharedSuite(t, chainupgrade.RunChainUpgrade) diff --git a/tests/systemtests/mempool/test_broadcast.go b/tests/systemtests/mempool/test_broadcast.go new file mode 100644 index 000000000..a13c32063 --- /dev/null +++ b/tests/systemtests/mempool/test_broadcast.go @@ -0,0 +1,415 @@ +//go:build system_test + +package mempool + +import ( + "context" + "fmt" + "slices" + "testing" + "time" + + "github.com/test-go/testify/require" + + "github.com/cosmos/evm/tests/systemtests/suite" +) + +// RunTxBroadcasting tests transaction broadcasting and duplicate handling in a multi-node network. +// +// This test verifies two critical aspects of transaction propagation: +// +// 1. Mempool Broadcasting: Transactions submitted to one node are gossiped to all other nodes +// via the mempool gossip protocol BEFORE blocks are produced. +// +// 2. Duplicate Detection: The RPC layer properly rejects duplicate transactions submitted +// by users via JSON-RPC (returning txpool.ErrAlreadyKnown), while internal gossip remains silent. +// +// The test uses a 5-second consensus timeout to create a larger window for verifying that +// transactions appear in other nodes' mempools before blocks are committed. +func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { + testCases := []struct { + name string + actions []func(*TestSuite, *TestContext) + }{ + { + // Scenario 1: Basic Broadcasting and Transaction Promotion + // + // This scenario verifies that: + // 1. Transactions are gossiped to all nodes BEFORE blocks are committed + // 2. Queued transactions (with nonce gaps) are NOT gossiped + // 3. When gaps are filled, queued txs are promoted and then gossiped + // + // Broadcasting Flow: + // User -> JSON-RPC -> Mempool (pending) -> Gossip to peers + // When nonce gap filled: Queued -> Promoted to pending -> Gossiped + name: "tx broadcast to other nodes %s", + actions: []func(*TestSuite, *TestContext){ + func(s *TestSuite, ctx *TestContext) { + // Step 1: Send tx with nonce 0 to node0 + // Expected: tx is added to node0's pending pool (nonce is correct) + signer := s.Acc(0) + + // Send transaction to node0 + tx1, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) + require.NoError(t, err, "failed to send tx to node0") + + // Step 2: Verify tx appears in nodes 1, 2, 3 mempools within 3 seconds + // Expected: tx is gossiped to all nodes BEFORE any block is committed (5s timeout) + // This proves mempool gossip works, not just block propagation + maxWaitTime := 3 * time.Second + checkInterval := 100 * time.Millisecond + + for _, nodeIdx := range []int{1, 2, 3} { + func(nodeIdx int) { + nodeID := s.Node(nodeIdx) + found := false + + timeoutCtx, cancel := context.WithTimeout(context.Background(), maxWaitTime) + defer cancel() + + ticker := time.NewTicker(checkInterval) + defer ticker.Stop() + + for !found { + select { + case <-timeoutCtx.Done(): + require.FailNow(t, fmt.Sprintf( + "transaction %s was not broadcast to %s within %s - mempool gossip may not be working", + tx1.TxHash, nodeID, maxWaitTime, + )) + case <-ticker.C: + pendingTxs, _, err := s.TxPoolContent(nodeID, suite.TxTypeEVM, 5*time.Second) + if err != nil { + // Retry on error + continue + } + + if slices.Contains(pendingTxs, tx1.TxHash) { + t.Logf("✓ Transaction %s successfully broadcast to %s", tx1.TxHash, nodeID) + found = true + } + } + } + }(nodeIdx) + } + + // Now set expected state and let the transaction commit normally + ctx.SetExpPendingTxs(tx1) + }, + func(s *TestSuite, ctx *TestContext) { + // Step 3: Send tx with nonce 2 to node1 (creating a nonce gap) + // Current nonce is 1 (after previous tx), so nonce 2 creates a gap + // Expected: tx is added to node1's QUEUED pool (not pending due to gap) + signer := s.Acc(0) + + // Send tx with nonce +2 to node1 (creating a gap since current nonce is 1) + tx3, err := s.SendTx(t, s.Node(1), signer.ID, 2, s.GasPriceMultiplier(10), nil) + require.NoError(t, err, "failed to send tx with nonce 2") + + // Step 4: Verify tx is in node1's QUEUED pool (not pending) + // Queued txs cannot execute until the nonce gap is filled + maxWaitTime := 2 * time.Second + checkInterval := 100 * time.Millisecond + + timeoutCtx, cancel := context.WithTimeout(context.Background(), maxWaitTime) + defer cancel() + + ticker := time.NewTicker(checkInterval) + defer ticker.Stop() + + queuedOnNode1 := false + for !queuedOnNode1 { + select { + case <-timeoutCtx.Done(): + require.FailNow(t, fmt.Sprintf( + "transaction %s was not queued on node1 within %s", + tx3.TxHash, maxWaitTime, + )) + case <-ticker.C: + _, queuedTxs, err := s.TxPoolContent(s.Node(1), suite.TxTypeEVM, 5*time.Second) + if err != nil { + continue + } + + if slices.Contains(queuedTxs, tx3.TxHash) { + t.Logf("✓ Transaction %s is queued on node1 (as expected due to nonce gap)", tx3.TxHash) + queuedOnNode1 = true + } + } + } + + // Step 5: Verify queued tx is NOT gossiped to other nodes + // Queued txs should stay local until they become pending (executable) + // This prevents network spam from non-executable transactions + time.Sleep(1 * time.Second) // Give some time for any potential gossip + + for _, nodeIdx := range []int{0, 2, 3} { + nodeID := s.Node(nodeIdx) + pendingTxs, queuedTxs, err := s.TxPoolContent(nodeID, suite.TxTypeEVM, 5*time.Second) + require.NoError(t, err, "failed to get txpool content from %s", nodeID) + + require.NotContains(t, pendingTxs, tx3.TxHash, + "queued transaction should not be in pending pool of %s", nodeID) + require.NotContains(t, queuedTxs, tx3.TxHash, + "queued transaction should not be broadcast to %s", nodeID) + } + + // Step 6: Send tx with nonce +1 to node2 (filling the gap) + // Expected: tx is added to node2's pending pool and gossiped + tx2, err := s.SendTx(t, s.Node(2), signer.ID, 1, s.GasPriceMultiplier(10), nil) + require.NoError(t, err, "failed to send tx with nonce 1") + + // Step 7: Verify BOTH txs appear in all nodes' pending pools + // - tx2 (nonce=1) should be gossiped immediately (it's pending) + // - tx3 (nonce=2) should be promoted from queued to pending on node1 + // - Promoted tx3 should then be gossiped to all other nodes + // This proves queued txs get rebroadcast when promoted + maxWaitTime = 3 * time.Second + ticker2 := time.NewTicker(checkInterval) + defer ticker2.Stop() + + for _, nodeIdx := range []int{0, 1, 3} { + func(nodeIdx int) { + nodeID := s.Node(nodeIdx) + foundTx2 := false + foundTx3 := false + + timeoutCtx2, cancel2 := context.WithTimeout(context.Background(), maxWaitTime) + defer cancel2() + + for !foundTx2 || !foundTx3 { + select { + case <-timeoutCtx2.Done(): + if !foundTx2 { + require.FailNow(t, fmt.Sprintf( + "transaction %s was not broadcast to %s within %s", + tx2.TxHash, nodeID, maxWaitTime, + )) + } + if !foundTx3 { + require.FailNow(t, fmt.Sprintf( + "transaction %s (promoted from queued) was not broadcast to %s within %s", + tx3.TxHash, nodeID, maxWaitTime, + )) + } + case <-ticker2.C: + pendingTxs, _, err := s.TxPoolContent(nodeID, suite.TxTypeEVM, 5*time.Second) + if err != nil { + continue + } + + if !foundTx2 && slices.Contains(pendingTxs, tx2.TxHash) { + t.Logf("✓ Transaction %s broadcast to %s", tx2.TxHash, nodeID) + foundTx2 = true + } + + if !foundTx3 && slices.Contains(pendingTxs, tx3.TxHash) { + t.Logf("✓ Transaction %s (promoted) broadcast to %s", tx3.TxHash, nodeID) + foundTx3 = true + } + } + } + }(nodeIdx) + } + + ctx.SetExpPendingTxs(tx2, tx3) + }, + }, + }, + { + // Scenario 2: Duplicate Detection on Same Node + // + // This scenario verifies that when a user submits the same transaction twice + // to the same node via JSON-RPC, the second submission returns an error. + // + // Error Handling: + // RPC Layer: Checks mempool.Has(txHash) -> returns txpool.ErrAlreadyKnown + // Users get immediate error feedback (not silent failure) + name: "duplicate tx rejected on same node %s", + actions: []func(*TestSuite, *TestContext){ + func(s *TestSuite, ctx *TestContext) { + // Step 1: Send tx with the current nonce to node0 + // Expected: tx is accepted and added to pending pool + signer := s.Acc(0) + + // Send transaction to node0 + tx1, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) + require.NoError(t, err, "failed to send tx to node0") + + // Step 2: Verify tx is in node0's pending pool + // Poll for the transaction to appear (it should be fast, but we need to wait for async processing) + maxWaitTime := 3 * time.Second + checkInterval := 100 * time.Millisecond + + timeoutCtx, cancel := context.WithTimeout(context.Background(), maxWaitTime) + defer cancel() + + ticker := time.NewTicker(checkInterval) + defer ticker.Stop() + + found := false + for !found { + select { + case <-timeoutCtx.Done(): + require.FailNow(t, fmt.Sprintf( + "transaction %s was not found in node0's pending pool within %s", + tx1.TxHash, maxWaitTime, + )) + case <-ticker.C: + pendingTxs, _, err := s.TxPoolContent(s.Node(0), suite.TxTypeEVM, 5*time.Second) + if err != nil { + continue + } + if slices.Contains(pendingTxs, tx1.TxHash) { + found = true + } + } + } + + // Step 3: Send the SAME transaction again to node0 via JSON-RPC + // Expected: Error returned (txpool.ErrAlreadyKnown) + // Users must receive error feedback for duplicate submissions + _, err = s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) + require.Error(t, err, "duplicate tx via JSON-RPC must return error") + require.Contains(t, err.Error(), "already known", "error should indicate transaction is already known") + + t.Logf("✓ Duplicate transaction correctly rejected with 'already known' error") + + ctx.SetExpPendingTxs(tx1) + }, + }, + }, + { + // Scenario 3: Duplicate Detection After Gossip + // + // This scenario verifies that even when a node receives a transaction via + // internal gossip (not user submission), attempting to submit that same + // transaction again via JSON-RPC returns an error. + // + // Flow: + // 1. User submits tx to node0 -> added to mempool -> gossiped to node1 + // 2. User tries to submit same tx to node1 via JSON-RPC + // 3. RPC layer detects duplicate (mempool.Has) and returns error + // + // This ensures duplicate detection works regardless of how the node + // originally received the transaction (user submission vs gossip). + name: "duplicate tx rejected after gossip %s", + actions: []func(*TestSuite, *TestContext){ + func(s *TestSuite, ctx *TestContext) { + // Step 1: Send tx with the current nonce to node0 + // Expected: tx is accepted, added to pending, and gossiped + signer := s.Acc(0) + + // Send transaction to node0 + tx1, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) + require.NoError(t, err, "failed to send tx to node0") + + // Step 2: Wait for tx to be gossiped to node1 + // Expected: tx appears in node1's pending pool within 3 seconds + maxWaitTime := 3 * time.Second + checkInterval := 100 * time.Millisecond + + timeoutCtx, cancel := context.WithTimeout(context.Background(), maxWaitTime) + defer cancel() + + ticker := time.NewTicker(checkInterval) + defer ticker.Stop() + + found := false + for !found { + select { + case <-timeoutCtx.Done(): + require.FailNow(t, fmt.Sprintf( + "transaction %s was not broadcast to node1 within %s", + tx1.TxHash, maxWaitTime, + )) + case <-ticker.C: + pendingTxs, _, err := s.TxPoolContent(s.Node(1), suite.TxTypeEVM, 5*time.Second) + if err != nil { + continue + } + if slices.Contains(pendingTxs, tx1.TxHash) { + t.Logf("✓ Transaction %s broadcast to node1 via gossip", tx1.TxHash) + found = true + } + } + } + + // Step 3: Try to send the SAME transaction to node1 via JSON-RPC + // Expected: Error returned (txpool.ErrAlreadyKnown) + // Even though node1 received it via gossip (not user submission), + // the RPC layer should still detect and reject the duplicate + _, err = s.SendTx(t, s.Node(1), signer.ID, 0, s.GasPriceMultiplier(10), nil) + require.Error(t, err, "duplicate tx via JSON-RPC should return error even after gossip") + require.Contains(t, err.Error(), "already known", "error should indicate transaction is already known") + + t.Logf("✓ JSON-RPC correctly rejects duplicate that node already has from gossip") + + ctx.SetExpPendingTxs(tx1) + }, + }, + }, + } + + testOptions := []*suite.TestOptions{ + { + Description: "EVM LegacyTx", + TxType: suite.TxTypeEVM, + IsDynamicFeeTx: false, + }, + { + Description: "EVM DynamicFeeTx", + TxType: suite.TxTypeEVM, + IsDynamicFeeTx: true, + }, + } + + s := NewTestSuite(base) + + // First, setup the chain with default configuration + s.SetupTest(t) + + // Now modify the consensus timeout to slow down block production + // This gives us time to verify broadcasting happens before blocks are committed + s.ModifyConsensusTimeout(t, "5s") + + for _, to := range testOptions { + s.SetOptions(to) + for _, tc := range testCases { + testName := fmt.Sprintf(tc.name, to.Description) + t.Run(testName, func(t *testing.T) { + // Await a block before starting the test case (ensures clean state) + s.AwaitNBlocks(t, 1) + + ctx := NewTestContext() + s.BeforeEachCase(t, ctx) + + // Capture the initial block height - no blocks should be produced during the test case + initialHeight := s.GetCurrentBlockHeight(t, "node0") + t.Logf("Test case starting at block height %d", initialHeight) + + // Execute all test actions (broadcasting, mempool checks, etc.) + for _, action := range tc.actions { + action(s, ctx) + // NOTE: We don't call AfterEachAction here because we're manually + // checking the mempool state in the action functions + } + + // Verify no blocks were produced during the test case + // All broadcasting and mempool checks should happen within a single block period + currentHeight := s.GetCurrentBlockHeight(t, "node0") + require.Equal(t, initialHeight, currentHeight, + "No blocks should be produced during test case execution - expected height %d but got %d", + initialHeight, currentHeight) + t.Logf("✓ Test case completed at same block height %d (no blocks produced)", currentHeight) + + // Now await a block to allow transactions to commit + s.AwaitNBlocks(t, 1) + t.Logf("Awaited block for transaction commits") + + // Verify transactions committed successfully + s.AfterEachCase(t, ctx) + }) + } + } +} diff --git a/tests/systemtests/mempool/test_exceptions.go b/tests/systemtests/mempool/test_exceptions.go index 0a2063e0b..459231ebc 100644 --- a/tests/systemtests/mempool/test_exceptions.go +++ b/tests/systemtests/mempool/test_exceptions.go @@ -10,90 +10,6 @@ import ( "github.com/test-go/testify/require" ) -func RunTxRebroadcasting(t *testing.T, base *suite.BaseTestSuite) { - testCases := []struct { - name string - actions []func(*TestSuite, *TestContext) - }{ - { - name: "ordering of pending txs %s", - actions: []func(*TestSuite, *TestContext){ - func(s *TestSuite, ctx *TestContext) { - signer := s.Acc(0) - - tx1, err := s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx") - - tx2, err := s.SendTx(t, s.Node(1), signer.ID, 1, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx") - - tx3, err := s.SendTx(t, s.Node(2), signer.ID, 2, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx") - - // Skip tx4 with nonce 3 - - tx5, err := s.SendTx(t, s.Node(3), signer.ID, 4, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx") - - tx6, err := s.SendTx(t, s.Node(0), signer.ID, 5, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx") - - // At AfterEachAction hook, we will check expected queued txs are not broadcasted. - ctx.SetExpPendingTxs(tx1, tx2, tx3) - ctx.SetExpQueuedTxs(tx5, tx6) - }, - func(s *TestSuite, ctx *TestContext) { - // Wait for 3 blocks. - // It is because tx1, tx2, tx3 are sent to different nodes, tx3 needs maximum 3 blocks to be committed. - // e.g. node3 is 1st proposer -> tx3 will tale 1 block to be committed. - // e.g. node3 is 3rd proposer -> tx3 will take 3 blocks to be committed. - s.AwaitNBlocks(t, 3) - - // current nonce is 3. - // so, we should set nonce idx to 0. - nonce3Idx := uint64(0) - - signer := s.Acc(0) - - tx4, err := s.SendTx(t, s.Node(2), signer.ID, nonce3Idx, s.GasPriceMultiplier(10), nil) - require.NoError(t, err, "failed to send tx") - - // At AfterEachAction hook, we will check expected pending txs are broadcasted. - ctx.SetExpPendingTxs(tx4) - ctx.PromoteExpTxs(2) - }, - }, - }, - } - - testOptions := []*suite.TestOptions{ - { - Description: "EVM LegacyTx", - TxType: suite.TxTypeEVM, - IsDynamicFeeTx: false, - }, - } - - s := NewTestSuite(base) - s.SetupTest(t) - - for _, to := range testOptions { - s.SetOptions(to) - for _, tc := range testCases { - testName := fmt.Sprintf(tc.name, to.Description) - t.Run(testName, func(t *testing.T) { - ctx := NewTestContext() - s.BeforeEachCase(t, ctx) - for _, action := range tc.actions { - action(s, ctx) - s.AfterEachAction(t, ctx) - } - s.AfterEachCase(t, ctx) - }) - } - } -} - func RunMinimumGasPricesZero(t *testing.T, base *suite.BaseTestSuite) { testCases := []struct { name string diff --git a/tests/systemtests/mempool/test_suite.go b/tests/systemtests/mempool/test_suite.go index c77620262..4558b667e 100644 --- a/tests/systemtests/mempool/test_suite.go +++ b/tests/systemtests/mempool/test_suite.go @@ -3,9 +3,14 @@ package mempool import ( + "fmt" + "os" + "path/filepath" "testing" "time" + "github.com/creachadair/tomledit" + "github.com/creachadair/tomledit/parser" "github.com/stretchr/testify/require" "github.com/cosmos/evm/tests/systemtests/suite" @@ -26,6 +31,16 @@ func (s *TestSuite) SetupTest(t *testing.T, nodeStartArgs ...string) { s.BaseTestSuite.SetupTest(t, nodeStartArgs...) } +// GetCurrentBlockHeight returns the current block height from the specified node +func (s *TestSuite) GetCurrentBlockHeight(t *testing.T, nodeID string) uint64 { + t.Helper() + account := s.EthAccount("acc0") + ctx, cli, _ := s.EthClient.Setup(nodeID, account) + blockNumber, err := cli.BlockNumber(ctx) + require.NoError(t, err, "failed to get block number from %s", nodeID) + return blockNumber +} + // BeforeEach resets the expected mempool state and retrieves the current base fee before each test case func (s *TestSuite) BeforeEachCase(t *testing.T, ctx *TestContext) { ctx.Reset() @@ -91,3 +106,66 @@ func (c *TestContext) PromoteExpTxs(count int) { c.ExpPending = append(c.ExpPending, promoted...) c.ExpQueued = c.ExpQueued[count:] } + +// ModifyConsensusTimeout modifies the consensus timeout_commit in the config.toml +// for all nodes and restarts the chain with the new configuration. +func (s *TestSuite) ModifyConsensusTimeout(t *testing.T, timeout string) { + t.Helper() + + // Stop the chain if running + if s.ChainStarted { + s.ResetChain(t) + } + + // Modify config.toml for each node + for i := 0; i < s.NodesCount(); i++ { + nodeDir := s.NodeDir(i) + configPath := filepath.Join(nodeDir, "config", "config.toml") + + err := editToml(configPath, func(doc *tomledit.Document) { + setValue(doc, timeout, "consensus", "timeout_commit") + }) + require.NoError(t, err, "failed to modify config.toml for node %d", i) + } + + // Restart the chain with modified config + s.StartChain(t, suite.DefaultNodeArgs()...) + s.AwaitNBlocks(t, 2) +} + +// editToml is a helper to edit TOML files +func editToml(filename string, f func(doc *tomledit.Document)) error { + tomlFile, err := os.OpenFile(filename, os.O_RDWR, 0o600) + if err != nil { + return fmt.Errorf("failed to open file: %w", err) + } + defer tomlFile.Close() + + doc, err := tomledit.Parse(tomlFile) + if err != nil { + return fmt.Errorf("failed to parse toml: %w", err) + } + + f(doc) + + if _, err := tomlFile.Seek(0, 0); err != nil { + return fmt.Errorf("failed to seek: %w", err) + } + if err := tomlFile.Truncate(0); err != nil { + return fmt.Errorf("failed to truncate: %w", err) + } + if err := tomledit.Format(tomlFile, doc); err != nil { + return fmt.Errorf("failed to format: %w", err) + } + + return nil +} + +// setValue sets a value in a TOML document +func setValue(doc *tomledit.Document, newVal string, xpath ...string) { + e := doc.First(xpath...) + if e == nil { + panic(fmt.Sprintf("not found: %v", xpath)) + } + e.Value = parser.MustValue(fmt.Sprintf("%q", newVal)) +}